Revert "Revert "Upgrade to 5.0.71.48"" DO NOT MERGE

This reverts commit f2e3994fa5148cc3d9946666f0b0596290192b0e,
and updates the x64 makefile properly so it doesn't break that
build.

FPIIM-449

Change-Id: Ib83e35bfbae6af627451c926a9650ec57c045605
(cherry picked from commit 109988c7ccb6f3fd1a58574fa3dfb88beaef6632)
diff --git a/build/android/AndroidManifest.xml b/build/android/AndroidManifest.xml
new file mode 100644
index 0000000..e1e2904
--- /dev/null
+++ b/build/android/AndroidManifest.xml
@@ -0,0 +1,20 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+  Copyright (c) 2012 The Chromium Authors. All rights reserved.  Use of this
+  source code is governed by a BSD-style license that can be found in the
+  LICENSE file.
+-->
+
+<!--
+  This is a dummy manifest which is required by:
+  1. aapt when generating R.java in java.gypi:
+     Nothing in the manifest is used, but it is still required by aapt.
+  2. lint: [min|target]SdkVersion are required by lint and should
+     be kept up-to-date.
+-->
+<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+    package="dummy.package">
+
+    <uses-sdk android:minSdkVersion="16" android:targetSdkVersion="23" />
+
+</manifest>
diff --git a/build/android/BUILD.gn b/build/android/BUILD.gn
new file mode 100644
index 0000000..9ee562c
--- /dev/null
+++ b/build/android/BUILD.gn
@@ -0,0 +1,137 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/config/android/rules.gni")
+import("//third_party/ijar/ijar.gni")
+
+sun_tools_jar_path = "$root_gen_dir/sun_tools_jar/tools.jar"
+
+# Create or update the API versions cache if necessary by running a
+# functionally empty lint task. This prevents racy creation of the
+# cache while linting java targets in android_lint.
+android_lint("prepare_android_lint_cache") {
+  android_manifest = "//build/android/AndroidManifest.xml"
+  create_cache = true
+}
+
+action("find_sun_tools_jar") {
+  script = "//build/android/gyp/find_sun_tools_jar.py"
+  depfile = "$target_gen_dir/$target_name.d"
+  outputs = [
+    depfile,
+    sun_tools_jar_path,
+  ]
+  args = [
+    "--depfile",
+    rebase_path(depfile, root_build_dir),
+    "--output",
+    rebase_path(sun_tools_jar_path, root_build_dir),
+  ]
+}
+
+java_prebuilt("sun_tools_java") {
+  jar_path = sun_tools_jar_path
+  jar_dep = ":find_sun_tools_jar"
+}
+
+generate_interface_jar("android_ijar") {
+  input_jar = android_sdk_jar
+  output_jar = "$root_out_dir/lib.java/android.interface.jar"
+}
+
+# Copy to the lib.unstripped directory so that gdb can easily find it.
+copy("cpplib_unstripped") {
+  _soname = "libc++_shared.so"
+  sources = [
+    "${android_libcpp_lib_dir}/${_soname}",
+  ]
+  outputs = [
+    "${root_out_dir}/lib.unstripped/${_soname}",
+  ]
+}
+
+action("cpplib_stripped") {
+  _strip_bin = "${android_tool_prefix}strip"
+  _soname = "libc++_shared.so"
+  _input_so = "${root_out_dir}/lib.unstripped/${_soname}"
+  _output_so = "${root_shlib_dir}/${_soname}"
+
+  deps = [
+    ":cpplib_unstripped",
+  ]
+
+  script = "//build/gn_run_binary.py"
+  inputs = [
+    _strip_bin,
+  ]
+  sources = [
+    _input_so,
+  ]
+  outputs = [
+    _output_so,
+  ]
+
+  _rebased_strip_bin = rebase_path(_strip_bin, root_out_dir)
+  _rebased_input_so = rebase_path(_input_so, root_out_dir)
+  _rebased_output_so = rebase_path(_output_so, root_out_dir)
+  args = [
+    _rebased_strip_bin,
+    "--strip-unneeded",
+    "-o",
+    _rebased_output_so,
+    _rebased_input_so,
+  ]
+}
+
+group("test_runner_py") {
+  _py_files = read_file("test_runner.pydeps", "list lines")
+
+  # Filter out comments.
+  set_sources_assignment_filter([ "#*" ])
+  sources = _py_files
+
+  data = sources + [
+           "devil_chromium.json",
+           "pylib/gtest/filter/",
+           "//third_party/android_tools/sdk/build-tools/23.0.1/aapt",
+           "//third_party/android_tools/sdk/build-tools/23.0.1/dexdump",
+           "//third_party/android_tools/sdk/build-tools/23.0.1/lib/libc++.so",
+           "//third_party/android_tools/sdk/build-tools/23.0.1/split-select",
+           "//third_party/android_tools/sdk/platform-tools/adb",
+           "//third_party/catapult/third_party/gsutil/",
+           "//third_party/catapult/devil/devil/devil_dependencies.json",
+           "//third_party/proguard/lib/proguard.jar",
+         ]
+  data_deps = [
+    "//tools/swarming_client:isolate_py",
+  ]
+}
+
+# Create wrapper scripts in out/bin that takes care of setting the
+# --output-directory.
+_scripts_to_wrap = [
+  # TODO(agrieve): Once GYP is no more, delete the checked-in adb_gdb_* scripts
+  # and generated a script for each android_apk() that has a native library.
+  "adb_gdb_android_webview_shell",
+  "adb_gdb_blimp_client",
+  "adb_gdb_chrome_public",
+  "adb_gdb_content_shell",
+  "adb_gdb_cronet_sample",
+  "adb_gdb_mojo_shell",
+  "asan_symbolize.py",
+  "tombstones.py",
+]
+
+_wrapper_targets = []
+foreach(script, _scripts_to_wrap) {
+  _target_name = get_path_info(script, "name") + "_wrapper"
+  _wrapper_targets += [ ":$_target_name" ]
+  wrapper_script(_target_name) {
+    target = script
+  }
+}
+
+group("wrapper_scripts") {
+  deps = _wrapper_targets
+}
diff --git a/build/android/CheckInstallApk-debug.apk b/build/android/CheckInstallApk-debug.apk
new file mode 100644
index 0000000..3dc3191
--- /dev/null
+++ b/build/android/CheckInstallApk-debug.apk
Binary files differ
diff --git a/build/android/OWNERS b/build/android/OWNERS
new file mode 100644
index 0000000..13e19f5
--- /dev/null
+++ b/build/android/OWNERS
@@ -0,0 +1,5 @@
+jbudorick@chromium.org
+mikecase@chromium.org
+pasko@chromium.org
+perezju@chromium.org
+rnephew@chromium.org
diff --git a/build/android/PRESUBMIT.py b/build/android/PRESUBMIT.py
new file mode 100644
index 0000000..210acf9
--- /dev/null
+++ b/build/android/PRESUBMIT.py
@@ -0,0 +1,75 @@
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Presubmit script for android buildbot.
+
+See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
+details on the presubmit API built into depot_tools.
+"""
+
+
+def CommonChecks(input_api, output_api):
+  output = []
+
+  build_android_dir = input_api.PresubmitLocalPath()
+
+  def J(*dirs):
+    """Returns a path relative to presubmit directory."""
+    return input_api.os_path.join(build_android_dir, *dirs)
+
+  build_pys = [
+      r'gyp/.*\.py$',
+      r'gn/.*\.py',
+      r'incremental_install/.*\.py',
+  ]
+  output.extend(input_api.canned_checks.RunPylint(
+      input_api,
+      output_api,
+      pylintrc='pylintrc',
+      black_list=build_pys,
+      extra_paths_list=[
+          J(),
+          J('buildbot'),
+          J('..', '..', 'third_party', 'catapult', 'devil')
+      ]))
+  output.extend(input_api.canned_checks.RunPylint(
+      input_api,
+      output_api,
+      white_list=build_pys,
+      extra_paths_list=[J('gyp'), J('gn')]))
+
+  # Disabled due to http://crbug.com/410936
+  #output.extend(input_api.canned_checks.RunUnitTestsInDirectory(
+  #input_api, output_api, J('buildbot', 'tests')))
+
+  pylib_test_env = dict(input_api.environ)
+  pylib_test_env.update({
+      'PYTHONPATH': build_android_dir,
+      'PYTHONDONTWRITEBYTECODE': '1',
+  })
+  output.extend(input_api.canned_checks.RunUnitTests(
+      input_api,
+      output_api,
+      unit_tests=[
+          J('.', 'emma_coverage_stats_test.py'),
+          J('gyp', 'util', 'md5_check_test.py'),
+          J('play_services', 'update_test.py'),
+          J('pylib', 'base', 'test_dispatcher_unittest.py'),
+          J('pylib', 'gtest', 'gtest_test_instance_test.py'),
+          J('pylib', 'instrumentation',
+            'instrumentation_test_instance_test.py'),
+          J('pylib', 'results', 'json_results_test.py'),
+          J('pylib', 'symbols', 'elf_symbolizer_unittest.py'),
+      ],
+      env=pylib_test_env))
+
+  return output
+
+
+def CheckChangeOnUpload(input_api, output_api):
+  return CommonChecks(input_api, output_api)
+
+
+def CheckChangeOnCommit(input_api, output_api):
+  return CommonChecks(input_api, output_api)
diff --git a/build/android/adb_android_webview_command_line b/build/android/adb_android_webview_command_line
new file mode 100755
index 0000000..9075918
--- /dev/null
+++ b/build/android/adb_android_webview_command_line
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# If no flags are given, prints the current content shell flags.
+#
+# Otherwise, the given flags are used to REPLACE (not modify) the content shell
+# flags. For example:
+#   adb_android_webview_command_line --enable-webgl
+#
+# To remove all content shell flags, pass an empty string for the flags:
+#   adb_android_webview_command_line ""
+
+exec $(dirname $0)/adb_command_line.py --device-path \
+    /data/local/tmp/android-webview-command-line "$@"
diff --git a/build/android/adb_blimp_command_line b/build/android/adb_blimp_command_line
new file mode 100755
index 0000000..1ff3769
--- /dev/null
+++ b/build/android/adb_blimp_command_line
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# If no flags are given, prints the current Blimp flags.
+#
+# Otherwise, the given flags are used to REPLACE (not modify) the Blimp
+# flags. For example:
+#   adb_blimp_command_line --enable-webgl
+#
+# To remove all Blimp flags, pass an empty string for the flags:
+#   adb_blimp_command_line ""
+
+exec $(dirname $0)/adb_command_line.py --device-path \
+    /data/local/blimp-command-line "$@"
diff --git a/build/android/adb_cast_shell_command_line b/build/android/adb_cast_shell_command_line
new file mode 100755
index 0000000..bcbcbeb
--- /dev/null
+++ b/build/android/adb_cast_shell_command_line
@@ -0,0 +1,22 @@
+#!/bin/bash
+#
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# If no flags are given, prints the current cast shell flags.
+#
+# Otherwise, the given flags are used to REPLACE (not modify) the cast shell
+# flags. For example:
+#   apk_command_line --enable-media-thread-for-media-playback
+#
+# If multiple devices are connected, use the --device argument to specify the
+# device ID. You can use
+#   adb devices
+# ... to find the device's ID.
+#
+# To remove all content shell flags, pass an empty string for the flags:
+#   apk_command_line ""
+
+exec $(dirname $0)/../../build/android/adb_command_line.py -e cast_shell \
+  --device-path /data/local/tmp/castshell-command-line "$@"
diff --git a/build/android/adb_chrome_public_command_line b/build/android/adb_chrome_public_command_line
new file mode 100755
index 0000000..ac379e8
--- /dev/null
+++ b/build/android/adb_chrome_public_command_line
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# If no flags are given, prints the current Chrome flags.
+#
+# Otherwise, the given flags are used to REPLACE (not modify) the Chrome
+# flags. For example:
+#   adb_chrome_public_command_line --enable-webgl
+#
+# To remove all Chrome flags, pass an empty string for the flags:
+#   adb_chrome_public_command_line ""
+
+exec $(dirname $0)/adb_command_line.py --device-path \
+    /data/local/chrome-command-line "$@"
diff --git a/build/android/adb_command_line.py b/build/android/adb_command_line.py
new file mode 100755
index 0000000..72f42b6
--- /dev/null
+++ b/build/android/adb_command_line.py
@@ -0,0 +1,84 @@
+#!/usr/bin/python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Utility for reading / writing command-line flag files on device(s)."""
+
+import argparse
+import sys
+
+import devil_chromium
+
+from devil.android import device_utils
+from devil.android import device_errors
+from devil.utils import cmd_helper
+
+
+def main():
+  parser = argparse.ArgumentParser(description=__doc__)
+  parser.usage = '''%(prog)s --device-path PATH [--device SERIAL] [flags...]
+
+No flags: Prints existing command-line file.
+Empty string: Deletes command-line file.
+Otherwise: Writes command-line file.
+
+'''
+  parser.add_argument('-d', '--device', dest='devices', action='append',
+                      default=[], help='Target device serial (repeatable).')
+  parser.add_argument('--device-path', required=True,
+                      help='Remote path to flags file.')
+  parser.add_argument('-e', '--executable', dest='executable', default='chrome',
+                      help='Name of the executable.')
+  args, remote_args = parser.parse_known_args()
+
+  devil_chromium.Initialize()
+
+  as_root = not args.device_path.startswith('/data/local/tmp/')
+
+  devices = device_utils.DeviceUtils.HealthyDevices(device_arg=args.devices,
+                                                    default_retries=0)
+  all_devices = device_utils.DeviceUtils.parallel(devices)
+
+  def print_args():
+    def read_flags(device):
+      try:
+        return device.ReadFile(args.device_path, as_root=as_root).rstrip()
+      except device_errors.AdbCommandFailedError:
+        return ''  # File might not exist.
+
+    descriptions = all_devices.pMap(lambda d: d.build_description).pGet(None)
+    flags = all_devices.pMap(read_flags).pGet(None)
+    for d, desc, flags in zip(devices, descriptions, flags):
+      print '  %s (%s): %r' % (d, desc, flags)
+
+  # No args == print flags.
+  if not remote_args:
+    print 'Existing flags (in %s):' % args.device_path
+    print_args()
+    return 0
+
+  # Empty string arg == delete flags file.
+  if len(remote_args) == 1 and not remote_args[0]:
+    def delete_flags(device):
+      device.RunShellCommand(['rm', '-f', args.device_path], as_root=as_root)
+    all_devices.pMap(delete_flags).pGet(None)
+    print 'Deleted %s' % args.device_path
+    return 0
+
+  # Set flags.
+  quoted_args = ' '.join(cmd_helper.SingleQuote(x) for x in remote_args)
+  flags_str = ' '.join([args.executable, quoted_args])
+
+  def write_flags(device):
+    device.WriteFile(args.device_path, flags_str, as_root=as_root)
+    device.RunShellCommand(['chmod', '0664', args.device_path], as_root=as_root)
+
+  all_devices.pMap(write_flags).pGet(None)
+  print 'Wrote flags to %s' % args.device_path
+  print_args()
+  return 0
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/build/android/adb_content_shell_command_line b/build/android/adb_content_shell_command_line
new file mode 100755
index 0000000..02ef802
--- /dev/null
+++ b/build/android/adb_content_shell_command_line
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# If no flags are given, prints the current content shell flags.
+#
+# Otherwise, the given flags are used to REPLACE (not modify) the content shell
+# flags. For example:
+#   adb_content_shell_command_line --enable-webgl
+#
+# To remove all content shell flags, pass an empty string for the flags:
+#   adb_content_shell_command_line ""
+
+exec $(dirname $0)/adb_command_line.py --device-path \
+    /data/local/tmp/content-shell-command-line "$@"
diff --git a/build/android/adb_device_functions.sh b/build/android/adb_device_functions.sh
new file mode 100755
index 0000000..66cc32f
--- /dev/null
+++ b/build/android/adb_device_functions.sh
@@ -0,0 +1,139 @@
+#!/bin/bash
+#
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# A collection of functions useful for maintaining android devices
+
+
+# Run an adb command on all connected device in parallel.
+# Usage: adb_all command line to eval.  Quoting is optional.
+#
+# Examples:
+#  adb_all install Chrome.apk
+#  adb_all 'shell cat /path/to/file'
+#
+adb_all() {
+  if [[ $# == 0 ]]; then
+    echo "Usage: adb_all <adb command>.  Quoting is optional."
+    echo "Example: adb_all install Chrome.apk"
+    return 1
+  fi
+  local DEVICES=$(adb_get_devices -b)
+  local NUM_DEVICES=$(echo $DEVICES | wc -w)
+  if (( $NUM_DEVICES > 1 )); then
+    echo "Looping over $NUM_DEVICES devices"
+  fi
+  _adb_multi "$DEVICES" "$*"
+}
+
+
+# Run a command on each connected device.  Quoting the command is suggested but
+# not required.  The script setups up variable DEVICE to correspond to the
+# current serial number.  Intended for complex one_liners that don't work in
+# adb_all
+# Usage: adb_device_loop 'command line to eval'
+adb_device_loop() {
+  if [[ $# == 0 ]]; then
+    echo "Intended for more complex one-liners that cannot be done with" \
+        "adb_all."
+    echo 'Usage: adb_device_loop "echo $DEVICE: $(adb root &&' \
+        'adb shell cat /data/local.prop)"'
+    return 1
+  fi
+  local DEVICES=$(adb_get_devices)
+  if [[ -z $DEVICES ]]; then
+    return
+  fi
+  # Do not change DEVICE variable name - part of api
+  for DEVICE in $DEVICES; do
+    DEV_TYPE=$(adb -s $DEVICE shell getprop ro.product.device | sed 's/\r//')
+    echo "Running on $DEVICE ($DEV_TYPE)"
+    ANDROID_SERIAL=$DEVICE eval "$*"
+  done
+}
+
+# Erases data from any devices visible on adb.  To preserve a device,
+# disconnect it or:
+#  1) Reboot it into fastboot with 'adb reboot bootloader'
+#  2) Run wipe_all_devices to wipe remaining devices
+#  3) Restore device it with 'fastboot reboot'
+#
+#  Usage: wipe_all_devices [-f]
+#
+wipe_all_devices() {
+  if [[ -z $(which adb) || -z $(which fastboot) ]]; then
+    echo "aborting: adb and fastboot not in path"
+    return 1
+  elif ! $(groups | grep -q 'plugdev'); then
+    echo "If fastboot fails, run: 'sudo adduser $(whoami) plugdev'"
+  fi
+
+  local DEVICES=$(adb_get_devices -b)
+
+  if [[ $1 != '-f' ]]; then
+    echo "This will ERASE ALL DATA from $(echo $DEVICES | wc -w) device."
+    read -p "Hit enter to continue"
+  fi
+
+  _adb_multi "$DEVICES" "reboot bootloader"
+  # Subshell to isolate job list
+  (
+  for DEVICE in $DEVICES; do
+    fastboot_erase $DEVICE &
+  done
+  wait
+  )
+
+  # Reboot devices together
+  for DEVICE in $DEVICES; do
+    fastboot -s $DEVICE reboot
+  done
+}
+
+# Wipe a device in fastboot.
+# Usage fastboot_erase [serial]
+fastboot_erase() {
+  if [[ -n $1 ]]; then
+    echo "Wiping $1"
+    local SERIAL="-s $1"
+  else
+    if [ -z $(fastboot devices) ]; then
+      echo "No devices in fastboot, aborting."
+      echo "Check out wipe_all_devices to see if sufficient"
+      echo "You can put a device in fastboot using adb reboot bootloader"
+      return 1
+    fi
+    local SERIAL=""
+  fi
+  fastboot $SERIAL erase cache
+  fastboot $SERIAL erase userdata
+}
+
+# Get list of devices connected via adb
+# Args: -b block until adb detects a device
+adb_get_devices() {
+  local DEVICES="$(adb devices | grep 'device$')"
+  if [[ -z $DEVICES && $1 == '-b' ]]; then
+    echo '- waiting for device -' >&2
+    local DEVICES="$(adb wait-for-device devices | grep 'device$')"
+  fi
+  echo "$DEVICES" | awk -vORS=' ' '{print $1}' | sed 's/ $/\n/'
+}
+
+###################################################
+## HELPER FUNCTIONS
+###################################################
+
+# Run an adb command in parallel over a device list
+_adb_multi() {
+  local DEVICES=$1
+  local ADB_ARGS=$2
+  (
+    for DEVICE in $DEVICES; do
+      adb -s $DEVICE $ADB_ARGS &
+    done
+    wait
+  )
+}
diff --git a/build/android/adb_gdb b/build/android/adb_gdb
new file mode 100755
index 0000000..00c4f89
--- /dev/null
+++ b/build/android/adb_gdb
@@ -0,0 +1,1040 @@
+#!/bin/bash
+#
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+
+# A generic script used to attach to a running Chromium process and
+# debug it. Most users should not use this directly, but one of the
+# wrapper scripts like adb_gdb_content_shell
+#
+# Use --help to print full usage instructions.
+#
+
+PROGNAME=$(basename "$0")
+PROGDIR=$(dirname "$0")
+
+# Force locale to C to allow recognizing output from subprocesses.
+LC_ALL=C
+
+# Location of Chromium-top-level sources.
+CHROMIUM_SRC=$(cd "$PROGDIR"/../.. >/dev/null && pwd 2>/dev/null)
+
+TMPDIR=
+GDBSERVER_PIDFILE=
+TARGET_GDBSERVER=
+COMMAND_PREFIX=
+
+clean_exit () {
+  if [ "$TMPDIR" ]; then
+    GDBSERVER_PID=$(cat $GDBSERVER_PIDFILE 2>/dev/null)
+    if [ "$GDBSERVER_PID" ]; then
+      log "Killing background gdbserver process: $GDBSERVER_PID"
+      kill -9 $GDBSERVER_PID >/dev/null 2>&1
+    fi
+    if [ "$TARGET_GDBSERVER" ]; then
+      log "Removing target gdbserver binary: $TARGET_GDBSERVER."
+      "$ADB" shell "$COMMAND_PREFIX" rm "$TARGET_GDBSERVER" >/dev/null 2>&1
+    fi
+    log "Cleaning up: $TMPDIR"
+    rm -rf "$TMPDIR"
+  fi
+  trap "" EXIT
+  exit $1
+}
+
+# Ensure clean exit on Ctrl-C or normal exit.
+trap "clean_exit 1" INT HUP QUIT TERM
+trap "clean_exit \$?" EXIT
+
+panic () {
+  echo "ERROR: $@" >&2
+  exit 1
+}
+
+fail_panic () {
+  if [ $? != 0 ]; then panic "$@"; fi
+}
+
+log () {
+  if [ "$VERBOSE" -gt 0 ]; then
+    echo "$@"
+  fi
+}
+
+DEFAULT_PULL_LIBS_DIR=/tmp/$USER-adb-gdb-libs
+
+# NOTE: Allow wrapper scripts to set various default through ADB_GDB_XXX
+# environment variables. This is only for cosmetic reasons, i.e. to
+# display proper
+
+# Allow wrapper scripts to set the default activity through
+# the ADB_GDB_ACTIVITY variable. Users are still able to change the
+# final activity name through --activity=<name> option.
+#
+# This is only for cosmetic reasons, i.e. to display the proper default
+# in the --help output.
+#
+DEFAULT_ACTIVITY=${ADB_GDB_ACTIVITY:-".Main"}
+
+# Allow wrapper scripts to set the program name through ADB_GDB_PROGNAME
+PROGNAME=${ADB_GDB_PROGNAME:-$(basename "$0")}
+
+ACTIVITY=$DEFAULT_ACTIVITY
+ADB=
+ANNOTATE=
+FORCE=
+GDBEXEPOSTFIX=gdb
+GDBINIT=
+GDBSERVER=
+HELP=
+NDK_DIR=
+NO_PULL_LIBS=
+PACKAGE_NAME=
+PID=
+PORT=
+PRIVILEGED=
+PRIVILEGED_INDEX=
+PROGRAM_NAME="activity"
+PULL_LIBS=
+PULL_LIBS_DIR=
+SANDBOXED=
+SANDBOXED_INDEX=
+START=
+START_URL=
+ATTACH_DELAY=1
+SU_PREFIX=
+SYMBOL_DIR=
+TARGET_ARCH=
+TOOLCHAIN=
+VERBOSE=0
+
+for opt; do
+  optarg=$(expr "x$opt" : 'x[^=]*=\(.*\)')
+  case $opt in
+    --adb=*)
+      ADB=$optarg
+      ;;
+    --device=*)
+      export ANDROID_SERIAL=$optarg
+      ;;
+    --activity=*)
+      ACTIVITY=$optarg
+      ;;
+    --annotate=3)
+      ANNOTATE=$optarg
+      ;;
+    --force)
+      FORCE=true
+      ;;
+    --gdbserver=*)
+      GDBSERVER=$optarg
+      ;;
+    --gdb=*)
+      GDB=$optarg
+      ;;
+    --help|-h|-?)
+      HELP=true
+      ;;
+    --ndk-dir=*)
+      NDK_DIR=$optarg
+      ;;
+    --no-pull-libs)
+      NO_PULL_LIBS=true
+      ;;
+    --package-name=*)
+      PACKAGE_NAME=$optarg
+      ;;
+    --pid=*)
+      PID=$optarg
+      ;;
+    --port=*)
+      PORT=$optarg
+      ;;
+    --privileged)
+      PRIVILEGED=true
+      ;;
+    --privileged=*)
+      PRIVILEGED=true
+      PRIVILEGED_INDEX=$optarg
+      ;;
+    --program-name=*)
+      PROGRAM_NAME=$optarg
+      ;;
+    --pull-libs)
+      PULL_LIBS=true
+      ;;
+    --pull-libs-dir=*)
+      PULL_LIBS_DIR=$optarg
+      ;;
+    --sandboxed)
+      SANDBOXED=true
+      ;;
+    --sandboxed=*)
+      SANDBOXED=true
+      SANDBOXED_INDEX=$optarg
+      ;;
+    --script=*)
+      GDBINIT=$optarg
+      ;;
+    --start=*)
+      START_URL=$optarg
+      ;& # fallthrough
+    --start)
+      START=true
+      ;;
+    --attach-delay=*)
+      ATTACH_DELAY=$optarg
+      ;;
+    --su-prefix=*)
+      SU_PREFIX=$optarg
+      ;;
+    --symbol-dir=*)
+      SYMBOL_DIR=$optarg
+      ;;
+    --output-directory=*)
+      CHROMIUM_OUTPUT_DIR=$optarg
+      ;;
+    --target-arch=*)
+      TARGET_ARCH=$optarg
+      ;;
+    --toolchain=*)
+      TOOLCHAIN=$optarg
+      ;;
+    --ui)
+      GDBEXEPOSTFIX=gdbtui
+      ;;
+    --verbose)
+      VERBOSE=$(( $VERBOSE + 1 ))
+      ;;
+    -*)
+      panic "Unknown option $opt, see --help." >&2
+      ;;
+    *)
+      if [ "$PACKAGE_NAME" ]; then
+        panic "You can only provide a single package name as argument!\
+ See --help."
+      fi
+      PACKAGE_NAME=$opt
+      ;;
+  esac
+done
+
+if [ "$HELP" ]; then
+  if [ "$ADB_GDB_PROGNAME" ]; then
+    # Assume wrapper scripts all provide a default package name.
+    cat <<EOF
+Usage: $PROGNAME [options]
+
+Attach gdb to a running Android $PROGRAM_NAME process.
+EOF
+  else
+    # Assume this is a direct call to adb_gdb
+  cat <<EOF
+Usage: $PROGNAME [options] [<package-name>]
+
+Attach gdb to a running Android $PROGRAM_NAME process.
+
+If provided, <package-name> must be the name of the Android application's
+package name to be debugged. You can also use --package-name=<name> to
+specify it.
+EOF
+  fi
+
+  cat <<EOF
+
+This script is used to debug a running $PROGRAM_NAME process.
+This can be a regular Android application process, sandboxed (if you use the
+--sandboxed or --sandboxed=<num> option) or a privileged (--privileged or
+--privileged=<num>) service.
+
+This script needs several things to work properly. It will try to pick
+them up automatically for you though:
+
+   - target gdbserver binary
+   - host gdb client (e.g. arm-linux-androideabi-gdb)
+   - directory with symbolic version of $PROGRAM_NAME's shared libraries.
+
+You can also use --ndk-dir=<path> to specify an alternative NDK installation
+directory.
+
+The script tries to find the most recent version of the debug version of
+shared libraries under one of the following directories:
+
+  \$CHROMIUM_SRC/<out>/lib/                (used by GYP builds)
+  \$CHROMIUM_SRC/<out>/lib.unstripped/     (used by GN builds)
+
+Where <out> is determined by CHROMIUM_OUTPUT_DIR, or --output-directory.
+
+You can set the path manually via --symbol-dir.
+
+The script tries to extract the target architecture from your target device,
+but if this fails, will default to 'arm'. Use --target-arch=<name> to force
+its value.
+
+Otherwise, the script will complain, but you can use the --gdbserver,
+--gdb and --symbol-lib options to specify everything manually.
+
+An alternative to --gdb=<file> is to use --toollchain=<path> to specify
+the path to the host target-specific cross-toolchain.
+
+You will also need the 'adb' tool in your path. Otherwise, use the --adb
+option. The script will complain if there is more than one device connected
+and a device is not specified with either --device or ANDROID_SERIAL).
+
+The first time you use it on a device, the script will pull many system
+libraries required by the process into a temporary directory. This
+is done to strongly improve the debugging experience, like allowing
+readable thread stacks and more. The libraries are copied to the following
+directory by default:
+
+  $DEFAULT_PULL_LIBS_DIR/
+
+But you can use the --pull-libs-dir=<path> option to specify an
+alternative. The script can detect when you change the connected device,
+and will re-pull the libraries only in this case. You can however force it
+with the --pull-libs option.
+
+Any local .gdbinit script will be ignored, but it is possible to pass a
+gdb command script with the --script=<file> option. Note that its commands
+will be passed to gdb after the remote connection and library symbol
+loading have completed.
+
+Valid options:
+  --help|-h|-?          Print this message.
+  --verbose             Increase verbosity.
+
+  --sandboxed           Debug first sandboxed process we find.
+  --sandboxed=<num>     Debug specific sandboxed process.
+  --symbol-dir=<path>   Specify directory with symbol shared libraries.
+  --output-directory=<path> Specify the output directory (e.g. "out/Debug").
+  --package-name=<name> Specify package name (alternative to 1st argument).
+  --privileged          Debug first privileged process we find.
+  --privileged=<num>    Debug specific privileged process.
+  --program-name=<name> Specify program name (cosmetic only).
+  --pid=<pid>           Specify application process pid.
+  --force               Kill any previous debugging session, if any.
+  --start[=<url>]       Start package's activity on device.
+  --attach-delay=<num>  Seconds to wait for gdbserver to attach to the
+                        remote process before starting gdb. Default 1.
+                        <num> may be a float if your sleep(1) supports it.
+  --ui                  Use gdbtui instead of gdb
+  --activity=<name>     Activity name for --start [$DEFAULT_ACTIVITY].
+  --annotate=<num>      Enable gdb annotation.
+  --script=<file>       Specify extra GDB init script.
+
+  --gdbserver=<file>    Specify target gdbserver binary.
+  --gdb=<file>          Specify host gdb client binary.
+  --target-arch=<name>  Specify NDK target arch.
+  --adb=<file>          Specify host ADB binary.
+  --device=<file>       ADB device serial to use (-s flag).
+  --port=<port>         Specify the tcp port to use.
+
+  --su-prefix=<prefix>  Prepend <prefix> to 'adb shell' commands that are
+                        run by this script. This can be useful to use
+                        the 'su' program on rooted production devices.
+                        e.g. --su-prefix="su -c"
+
+  --pull-libs           Force system libraries extraction.
+  --no-pull-libs        Do not extract any system library.
+  --libs-dir=<path>     Specify system libraries extraction directory.
+
+EOF
+  exit 0
+fi
+
+if [ -z "$PACKAGE_NAME" ]; then
+  panic "Please specify a package name on the command line. See --help."
+fi
+
+if [[ -z "$SYMBOL_DIR" && -z "$CHROMIUM_OUTPUT_DIR" ]]; then
+  if [[ -e "build.ninja" ]]; then
+    CHROMIUM_OUTPUT_DIR=$PWD
+  else
+    panic "Please specify an output directory by using one of:
+       --output-directory=out/Debug
+       CHROMIUM_OUTPUT_DIR=out/Debug
+       Setting working directory to an output directory.
+       See --help."
+   fi
+fi
+
+# Detect the build type and symbol directory. This is done by finding
+# the most recent sub-directory containing debug shared libraries under
+# $CHROMIUM_OUTPUT_DIR.
+#
+# Out: nothing, but this sets SYMBOL_DIR
+#
+detect_symbol_dir () {
+  # GYP places unstripped libraries under out/lib
+  # GN places them under out/lib.unstripped
+  local PARENT_DIR="$CHROMIUM_OUTPUT_DIR"
+  if [[ ! -e "$PARENT_DIR" ]]; then
+    PARENT_DIR="$CHROMIUM_SRC/$PARENT_DIR"
+  fi
+  SYMBOL_DIR="$PARENT_DIR/lib.unstripped"
+  if [[ -z "$(ls "$SYMBOL_DIR"/lib*.so 2>/dev/null)" ]]; then
+    SYMBOL_DIR="$PARENT_DIR/lib"
+    if [[ -z "$(ls "$SYMBOL_DIR"/lib*.so 2>/dev/null)" ]]; then
+      panic "Could not find any symbols under \
+$PARENT_DIR/lib{.unstripped}. Please build the program first!"
+    fi
+  fi
+  log "Auto-config: --symbol-dir=$SYMBOL_DIR"
+}
+
+if [ -z "$SYMBOL_DIR" ]; then
+  detect_symbol_dir
+elif [[ -z "$(ls "$SYMBOL_DIR"/lib*.so 2>/dev/null)" ]]; then
+  panic "Could not find any symbols under $SYMBOL_DIR"
+fi
+
+if [ -z "$NDK_DIR" ]; then
+  ANDROID_NDK_ROOT=$(PYTHONPATH=$CHROMIUM_SRC/build/android python -c \
+'from pylib.constants import ANDROID_NDK_ROOT; print ANDROID_NDK_ROOT,')
+else
+  if [ ! -d "$NDK_DIR" ]; then
+    panic "Invalid directory: $NDK_DIR"
+  fi
+  if [ ! -f "$NDK_DIR/ndk-build" ]; then
+    panic "Not a valid NDK directory: $NDK_DIR"
+  fi
+  ANDROID_NDK_ROOT=$NDK_DIR
+fi
+
+if [ "$GDBINIT" -a ! -f "$GDBINIT" ]; then
+  panic "Unknown --script file: $GDBINIT"
+fi
+
+# Check that ADB is in our path
+if [ -z "$ADB" ]; then
+  ADB=$(which adb 2>/dev/null)
+  if [ -z "$ADB" ]; then
+    panic "Can't find 'adb' tool in your path. Install it or use \
+--adb=<file>"
+  fi
+  log "Auto-config: --adb=$ADB"
+fi
+
+# Check that it works minimally
+ADB_VERSION=$($ADB version 2>/dev/null)
+echo "$ADB_VERSION" | fgrep -q -e "Android Debug Bridge"
+if [ $? != 0 ]; then
+  panic "Your 'adb' tool seems invalid, use --adb=<file> to specify a \
+different one: $ADB"
+fi
+
+# If there are more than one device connected, and ANDROID_SERIAL is not
+# defined, print an error message.
+NUM_DEVICES_PLUS2=$($ADB devices 2>/dev/null | wc -l)
+if [ "$NUM_DEVICES_PLUS2" -gt 3 -a -z "$ANDROID_SERIAL" ]; then
+  echo "ERROR: There is more than one Android device connected to ADB."
+  echo "Please define ANDROID_SERIAL to specify which one to use."
+  exit 1
+fi
+
+# Run a command through adb shell, strip the extra \r from the output
+# and return the correct status code to detect failures. This assumes
+# that the adb shell command prints a final \n to stdout.
+# $1+: command to run
+# Out: command's stdout
+# Return: command's status
+# Note: the command's stderr is lost
+adb_shell () {
+  local TMPOUT="$(mktemp)"
+  local LASTLINE RET
+  local ADB=${ADB:-adb}
+
+  # The weird sed rule is to strip the final \r on each output line
+  # Since 'adb shell' never returns the command's proper exit/status code,
+  # we force it to print it as '%%<status>' in the temporary output file,
+  # which we will later strip from it.
+  $ADB shell $@ ";" echo "%%\$?" 2>/dev/null | \
+      sed -e 's![[:cntrl:]]!!g' > $TMPOUT
+  # Get last line in log, which contains the exit code from the command
+  LASTLINE=$(sed -e '$!d' $TMPOUT)
+  # Extract the status code from the end of the line, which must
+  # be '%%<code>'.
+  RET=$(echo "$LASTLINE" | \
+    awk '{ if (match($0, "%%[0-9]+$")) { print substr($0,RSTART+2); } }')
+  # Remove the status code from the last line. Note that this may result
+  # in an empty line.
+  LASTLINE=$(echo "$LASTLINE" | \
+    awk '{ if (match($0, "%%[0-9]+$")) { print substr($0,1,RSTART-1); } }')
+  # The output itself: all lines except the status code.
+  sed -e '$d' $TMPOUT && printf "%s" "$LASTLINE"
+  # Remove temp file.
+  rm -f $TMPOUT
+  # Exit with the appropriate status.
+  return $RET
+}
+
+# Find the target architecture from a local shared library.
+# This returns an NDK-compatible architecture name.
+# out: NDK Architecture name, or empty string.
+get_gyp_target_arch () {
+  local RANDOM_LIB=$(ls "$SYMBOL_DIR"/lib*.so | head -n1)
+  local SO_DESC=$(file $RANDOM_LIB)
+  case $ARCH in
+    *32-bit*ARM,*) echo "arm";;
+    *64-bit*ARM,*) echo "arm64";;
+    *32-bit*Intel,*) echo "x86";;
+    *x86-64,*) echo "x86_64";;
+    *32-bit*MIPS,*) echo "mips";;
+    *) echo "";
+  esac
+}
+
+if [ -z "$TARGET_ARCH" ]; then
+  TARGET_ARCH=$(get_gyp_target_arch)
+  if [ -z "$TARGET_ARCH" ]; then
+    TARGET_ARCH=arm
+  fi
+else
+  # Nit: accept Chromium's 'ia32' as a valid target architecture. This
+  # script prefers the NDK 'x86' name instead because it uses it to find
+  # NDK-specific files (host gdb) with it.
+  if [ "$TARGET_ARCH" = "ia32" ]; then
+    TARGET_ARCH=x86
+    log "Auto-config: --arch=$TARGET_ARCH  (equivalent to ia32)"
+  fi
+fi
+
+# Detect the NDK system name, i.e. the name used to identify the host.
+# out: NDK system name (e.g. 'linux' or 'darwin')
+get_ndk_host_system () {
+  local HOST_OS
+  if [ -z "$NDK_HOST_SYSTEM" ]; then
+    HOST_OS=$(uname -s)
+    case $HOST_OS in
+      Linux) NDK_HOST_SYSTEM=linux;;
+      Darwin) NDK_HOST_SYSTEM=darwin;;
+      *) panic "You can't run this script on this system: $HOST_OS";;
+    esac
+  fi
+  echo "$NDK_HOST_SYSTEM"
+}
+
+# Detect the NDK host architecture name.
+# out: NDK arch name (e.g. 'x86' or 'x86_64')
+get_ndk_host_arch () {
+  local HOST_ARCH HOST_OS
+  if [ -z "$NDK_HOST_ARCH" ]; then
+    HOST_OS=$(get_ndk_host_system)
+    HOST_ARCH=$(uname -p)
+    case $HOST_ARCH in
+      i?86) NDK_HOST_ARCH=x86;;
+      x86_64|amd64) NDK_HOST_ARCH=x86_64;;
+      *) panic "You can't run this script on this host architecture: $HOST_ARCH";;
+    esac
+    # Darwin trick: "uname -p" always returns i386 on 64-bit installations.
+    if [ "$HOST_OS" = darwin -a "$NDK_HOST_ARCH" = "x86" ]; then
+      # Use '/usr/bin/file', not just 'file' to avoid buggy MacPorts
+      # implementations of the tool. See http://b.android.com/53769
+      HOST_64BITS=$(/usr/bin/file -L "$SHELL" | grep -e "x86[_-]64")
+      if [ "$HOST_64BITS" ]; then
+        NDK_HOST_ARCH=x86_64
+      fi
+    fi
+  fi
+  echo "$NDK_HOST_ARCH"
+}
+
+# Convert an NDK architecture name into a GNU configure triplet.
+# $1: NDK architecture name (e.g. 'arm')
+# Out: Android GNU configure triplet (e.g. 'arm-linux-androideabi')
+get_arch_gnu_config () {
+  case $1 in
+    arm)
+      echo "arm-linux-androideabi"
+      ;;
+    arm64)
+      echo "aarch64-linux-android"
+      ;;
+    x86)
+      echo "i686-linux-android"
+      ;;
+    x86_64)
+      echo "x86_64-linux-android"
+      ;;
+    mips)
+      echo "mipsel-linux-android"
+      ;;
+    *)
+      echo "$ARCH-linux-android"
+      ;;
+  esac
+}
+
+# Convert an NDK architecture name into a toolchain name prefix
+# $1: NDK architecture name (e.g. 'arm')
+# Out: NDK toolchain name prefix (e.g. 'arm-linux-androideabi')
+get_arch_toolchain_prefix () {
+  # Return the configure triplet, except for x86!
+  if [ "$1" = "x86" ]; then
+    echo "$1"
+  else
+    get_arch_gnu_config $1
+  fi
+}
+
+# Find a NDK toolchain prebuilt file or sub-directory.
+# This will probe the various arch-specific toolchain directories
+# in the NDK for the needed file.
+# $1: NDK install path
+# $2: NDK architecture name
+# $3: prebuilt sub-path to look for.
+# Out: file path, or empty if none is found.
+get_ndk_toolchain_prebuilt () {
+  local NDK_DIR="${1%/}"
+  local ARCH="$2"
+  local SUBPATH="$3"
+  local NAME="$(get_arch_toolchain_prefix $ARCH)"
+  local FILE TARGET
+  FILE=$NDK_DIR/toolchains/$NAME-4.9/prebuilt/$SUBPATH
+  if [ ! -f "$FILE" ]; then
+    FILE=$NDK_DIR/toolchains/$NAME-4.8/prebuilt/$SUBPATH
+    if [ ! -f "$FILE" ]; then
+      FILE=
+    fi
+  fi
+  echo "$FILE"
+}
+
+# Find the path to an NDK's toolchain full prefix for a given architecture
+# $1: NDK install path
+# $2: NDK target architecture name
+# Out: install path + binary prefix (e.g.
+#      ".../path/to/bin/arm-linux-androideabi-")
+get_ndk_toolchain_fullprefix () {
+  local NDK_DIR="$1"
+  local ARCH="$2"
+  local TARGET NAME HOST_OS HOST_ARCH GCC CONFIG
+
+  # NOTE: This will need to be updated if the NDK changes the names or moves
+  #        the location of its prebuilt toolchains.
+  #
+  GCC=
+  HOST_OS=$(get_ndk_host_system)
+  HOST_ARCH=$(get_ndk_host_arch)
+  CONFIG=$(get_arch_gnu_config $ARCH)
+  GCC=$(get_ndk_toolchain_prebuilt \
+        "$NDK_DIR" "$ARCH" "$HOST_OS-$HOST_ARCH/bin/$CONFIG-gcc")
+  if [ -z "$GCC" -a "$HOST_ARCH" = "x86_64" ]; then
+    GCC=$(get_ndk_toolchain_prebuilt \
+          "$NDK_DIR" "$ARCH" "$HOST_OS-x86/bin/$CONFIG-gcc")
+  fi
+  if [ ! -f "$GCC" -a "$ARCH" = "x86" ]; then
+    # Special case, the x86 toolchain used to be incorrectly
+    # named i686-android-linux-gcc!
+    GCC=$(get_ndk_toolchain_prebuilt \
+          "$NDK_DIR" "$ARCH" "$HOST_OS-x86/bin/i686-android-linux-gcc")
+  fi
+  if [ -z "$GCC" ]; then
+    panic "Cannot find Android NDK toolchain for '$ARCH' architecture. \
+Please verify your NDK installation!"
+  fi
+  echo "${GCC%%gcc}"
+}
+
+# $1: NDK install path
+# $2: target architecture.
+get_ndk_gdbserver () {
+  local NDK_DIR="$1"
+  local ARCH=$2
+  local BINARY
+
+  # The location has moved after NDK r8
+  BINARY=$NDK_DIR/prebuilt/android-$ARCH/gdbserver/gdbserver
+  if [ ! -f "$BINARY" ]; then
+    BINARY=$(get_ndk_toolchain_prebuilt "$NDK_DIR" "$ARCH" gdbserver)
+  fi
+  echo "$BINARY"
+}
+
+# Check/probe the path to the Android toolchain installation. Always
+# use the NDK versions of gdb and gdbserver. They must match to avoid
+# issues when both binaries do not speak the same wire protocol.
+#
+if [ -z "$TOOLCHAIN" ]; then
+  ANDROID_TOOLCHAIN=$(get_ndk_toolchain_fullprefix \
+                      "$ANDROID_NDK_ROOT" "$TARGET_ARCH")
+  ANDROID_TOOLCHAIN=$(dirname "$ANDROID_TOOLCHAIN")
+  log "Auto-config: --toolchain=$ANDROID_TOOLCHAIN"
+else
+  # Be flexible, allow one to specify either the install path or the bin
+  # sub-directory in --toolchain:
+  #
+  if [ -d "$TOOLCHAIN/bin" ]; then
+    TOOLCHAIN=$TOOLCHAIN/bin
+  fi
+  ANDROID_TOOLCHAIN=$TOOLCHAIN
+fi
+
+# Cosmetic: Remove trailing directory separator.
+ANDROID_TOOLCHAIN=${ANDROID_TOOLCHAIN%/}
+
+# Find host GDB client binary
+if [ -z "$GDB" ]; then
+  GDB=$(which $ANDROID_TOOLCHAIN/*-$GDBEXEPOSTFIX 2>/dev/null | head -1)
+  if [ -z "$GDB" ]; then
+    panic "Can't find Android gdb client in your path, check your \
+--toolchain or --gdb path."
+  fi
+  log "Host gdb client: $GDB"
+fi
+
+# Find gdbserver binary, we will later push it to /data/local/tmp
+# This ensures that both gdbserver and $GDB talk the same binary protocol,
+# otherwise weird problems will appear.
+#
+if [ -z "$GDBSERVER" ]; then
+  GDBSERVER=$(get_ndk_gdbserver "$ANDROID_NDK_ROOT" "$TARGET_ARCH")
+  if [ -z "$GDBSERVER" ]; then
+    panic "Can't find NDK gdbserver binary. use --gdbserver to specify \
+valid one!"
+  fi
+  log "Auto-config: --gdbserver=$GDBSERVER"
+fi
+
+# A unique ID for this script's session. This needs to be the same in all
+# sub-shell commands we're going to launch, so take the PID of the launcher
+# process.
+TMP_ID=$$
+
+# Temporary directory, will get cleaned up on exit.
+TMPDIR=/tmp/$USER-adb-gdb-tmp-$TMP_ID
+mkdir -p "$TMPDIR" && rm -rf "$TMPDIR"/*
+
+GDBSERVER_PIDFILE="$TMPDIR"/gdbserver-$TMP_ID.pid
+
+# If --force is specified, try to kill any gdbserver process started by the
+# same user on the device. Normally, these are killed automatically by the
+# script on exit, but there are a few corner cases where this would still
+# be needed.
+if [ "$FORCE" ]; then
+  GDBSERVER_PIDS=$(adb_shell ps | awk '$9 ~ /gdbserver/ { print $2; }')
+  for GDB_PID in $GDBSERVER_PIDS; do
+    log "Killing previous gdbserver (PID=$GDB_PID)"
+    adb_shell kill -9 $GDB_PID
+  done
+fi
+
+if [ "$START" ]; then
+  log "Starting $PROGRAM_NAME on device."
+  adb_shell am start -n $PACKAGE_NAME/$ACTIVITY ${START_URL:+-d "$START_URL"}
+  adb_shell ps | grep -q $PACKAGE_NAME
+  fail_panic "Could not start $PROGRAM_NAME on device. Are you sure the \
+package is installed?"
+fi
+
+# Return the timestamp of a given file, as number of seconds since epoch.
+# $1: file path
+# Out: file timestamp
+get_file_timestamp () {
+  stat -c %Y "$1" 2>/dev/null
+}
+
+# Allow several concurrent debugging sessions
+TARGET_GDBSERVER=/data/data/$PACKAGE_NAME/gdbserver-adb-gdb-$TMP_ID
+TMP_TARGET_GDBSERVER=/data/local/tmp/gdbserver-adb-gdb-$TMP_ID
+
+# Return the build fingerprint contained in a build.prop file.
+# $1: path to build.prop file
+get_build_fingerprint_from () {
+  cat "$1" | grep -e '^ro.build.fingerprint=' | cut -d= -f2
+}
+
+
+ORG_PULL_LIBS_DIR=$PULL_LIBS_DIR
+PULL_LIBS_DIR=${PULL_LIBS_DIR:-$DEFAULT_PULL_LIBS_DIR}
+
+HOST_FINGERPRINT=
+DEVICE_FINGERPRINT=$(adb_shell getprop ro.build.fingerprint)
+[[ "$DEVICE_FINGERPRINT" ]] || panic "Failed to get the device fingerprint"
+log "Device build fingerprint: $DEVICE_FINGERPRINT"
+
+# If --pull-libs-dir is not specified, and this is a platform build, look
+# if we can use the symbolic libraries under $ANDROID_PRODUCT_OUT/symbols/
+# directly, if the build fingerprint matches the device.
+if [ -z "$ORG_PULL_LIBS_DIR" -a \
+     "$ANDROID_PRODUCT_OUT" -a \
+     -f "$ANDROID_PRODUCT_OUT/system/build.prop" ]; then
+  ANDROID_FINGERPRINT=$(get_build_fingerprint_from \
+                        "$ANDROID_PRODUCT_OUT"/system/build.prop)
+  log "Android build fingerprint:  $ANDROID_FINGERPRINT"
+  if [ "$ANDROID_FINGERPRINT" = "$DEVICE_FINGERPRINT" ]; then
+    log "Perfect match!"
+    PULL_LIBS_DIR=$ANDROID_PRODUCT_OUT/symbols
+    HOST_FINGERPRINT=$ANDROID_FINGERPRINT
+    if [ "$PULL_LIBS" ]; then
+      log "Ignoring --pull-libs since the device and platform build \
+fingerprints match."
+      NO_PULL_LIBS=true
+    fi
+  fi
+fi
+
+# If neither --pull-libs an --no-pull-libs were specified, check the build
+# fingerprints of the device, and the cached system libraries on the host.
+#
+if [ -z "$NO_PULL_LIBS" -a -z "$PULL_LIBS" ]; then
+  if [ ! -f "$PULL_LIBS_DIR/build.fingerprint" ]; then
+    log "Auto-config: --pull-libs  (no cached libraries)"
+    PULL_LIBS=true
+  else
+    HOST_FINGERPRINT=$(< "$PULL_LIBS_DIR/build.fingerprint")
+    log "Host build fingerprint:   $HOST_FINGERPRINT"
+    if [ "$HOST_FINGERPRINT" == "$DEVICE_FINGERPRINT" ]; then
+      log "Auto-config: --no-pull-libs (fingerprint match)"
+      NO_PULL_LIBS=true
+    else
+      log "Auto-config: --pull-libs  (fingerprint mismatch)"
+      PULL_LIBS=true
+    fi
+  fi
+fi
+
+# Extract the system libraries from the device if necessary.
+if [ "$PULL_LIBS" -a -z "$NO_PULL_LIBS" ]; then
+  echo "Extracting system libraries into: $PULL_LIBS_DIR"
+fi
+
+mkdir -p "$PULL_LIBS_DIR"
+fail_panic "Can't create --libs-dir directory: $PULL_LIBS_DIR"
+
+# If requested, work for M-x gdb.  The gdb indirections make it
+# difficult to pass --annotate=3 to the gdb binary itself.
+GDB_ARGS=
+if [ "$ANNOTATE" ]; then
+  GDB_ARGS=$GDB_ARGS" --annotate=$ANNOTATE"
+fi
+
+# Get the PID from the first argument or else find the PID of the
+# browser process.
+if [ -z "$PID" ]; then
+  PROCESSNAME=$PACKAGE_NAME
+  if [ "$SANDBOXED_INDEX" ]; then
+    PROCESSNAME=$PROCESSNAME:sandboxed_process$SANDBOXED_INDEX
+  elif [ "$SANDBOXED" ]; then
+    PROCESSNAME=$PROCESSNAME:sandboxed_process
+    PID=$(adb_shell ps | \
+          awk '$9 ~ /^'$PROCESSNAME'/ { print $2; }' | head -1)
+  elif [ "$PRIVILEGED_INDEX" ]; then
+    PROCESSNAME=$PROCESSNAME:privileged_process$PRIVILEGED_INDEX
+  elif [ "$PRIVILEGED" ]; then
+    PROCESSNAME=$PROCESSNAME:privileged_process
+    PID=$(adb_shell ps | \
+          awk '$9 ~ /^'$PROCESSNAME'/ { print $2; }' | head -1)
+  fi
+  if [ -z "$PID" ]; then
+    PID=$(adb_shell ps | \
+          awk '$9 == "'$PROCESSNAME'" { print $2; }' | head -1)
+  fi
+  if [ -z "$PID" ]; then
+    if [ "$START" ]; then
+      panic "Can't find application process PID, did it crash?"
+    else
+      panic "Can't find application process PID, are you sure it is \
+running? Try using --start."
+    fi
+  fi
+  log "Found process PID: $PID"
+elif [ "$SANDBOXED" ]; then
+  echo "WARNING: --sandboxed option ignored due to use of --pid."
+elif [ "$PRIVILEGED" ]; then
+  echo "WARNING: --privileged option ignored due to use of --pid."
+fi
+
+# Determine if 'adb shell' runs as root or not.
+# If so, we can launch gdbserver directly, otherwise, we have to
+# use run-as $PACKAGE_NAME ..., which requires the package to be debuggable.
+#
+if [ "$SU_PREFIX" ]; then
+  # Need to check that this works properly.
+  SU_PREFIX_TEST_LOG=$TMPDIR/su-prefix.log
+  adb_shell $SU_PREFIX \"echo "foo"\" > $SU_PREFIX_TEST_LOG 2>&1
+  if [ $? != 0 -o "$(cat $SU_PREFIX_TEST_LOG)" != "foo" ]; then
+    echo "ERROR: Cannot use '$SU_PREFIX' as a valid su prefix:"
+    echo "$ adb shell $SU_PREFIX \"echo foo\""
+    cat $SU_PREFIX_TEST_LOG
+    exit 1
+  fi
+  COMMAND_PREFIX="$SU_PREFIX \""
+  COMMAND_SUFFIX="\""
+else
+  SHELL_UID=$(adb shell cat /proc/self/status | \
+              awk '$1 == "Uid:" { print $2; }')
+  log "Shell UID: $SHELL_UID"
+  if [ "$SHELL_UID" != 0 -o -n "$NO_ROOT" ]; then
+    COMMAND_PREFIX="run-as $PACKAGE_NAME"
+    COMMAND_SUFFIX=
+  else
+    COMMAND_PREFIX=
+    COMMAND_SUFFIX=
+  fi
+fi
+log "Command prefix: '$COMMAND_PREFIX'"
+log "Command suffix: '$COMMAND_SUFFIX'"
+
+# Pull device's system libraries that are mapped by our process.
+# Pulling all system libraries is too long, so determine which ones
+# we need by looking at /proc/$PID/maps instead
+if [ "$PULL_LIBS" -a -z "$NO_PULL_LIBS" ]; then
+  echo "Extracting system libraries into: $PULL_LIBS_DIR"
+  MAPPINGS=$(adb_shell $COMMAND_PREFIX cat /proc/$PID/maps $COMMAND_SUFFIX)
+  if [ $? != 0 ]; then
+    echo "ERROR: Could not list process's memory mappings."
+    if [ "$SU_PREFIX" ]; then
+      panic "Are you sure your --su-prefix is correct?"
+    else
+      panic "Use --su-prefix if the application is not debuggable."
+    fi
+  fi
+  # Remove the fingerprint file in case pulling one of the libs fails.
+  rm -f "$PULL_LIBS_DIR/build.fingerprint"
+  SYSTEM_LIBS=$(echo "$MAPPINGS" | \
+      awk '$6 ~ /\/system\/.*\.so$/ { print $6; }' | sort -u)
+  for SYSLIB in /system/bin/linker $SYSTEM_LIBS; do
+    echo "Pulling from device: $SYSLIB"
+    DST_FILE=$PULL_LIBS_DIR$SYSLIB
+    DST_DIR=$(dirname "$DST_FILE")
+    mkdir -p "$DST_DIR" && adb pull $SYSLIB "$DST_FILE" 2>/dev/null
+    fail_panic "Could not pull $SYSLIB from device !?"
+  done
+  echo "Writing the device fingerprint"
+  echo "$DEVICE_FINGERPRINT" > "$PULL_LIBS_DIR/build.fingerprint"
+fi
+
+# Find all the sub-directories of $PULL_LIBS_DIR, up to depth 4
+# so we can add them to solib-search-path later.
+SOLIB_DIRS=$(find $PULL_LIBS_DIR -mindepth 1 -maxdepth 4 -type d | \
+             grep -v "^$" | tr '\n' ':')
+
+# This is a re-implementation of gdbclient, where we use compatible
+# versions of gdbserver and $GDBNAME to ensure that everything works
+# properly.
+#
+
+# Push gdbserver to the device
+log "Pushing gdbserver $GDBSERVER to $TARGET_GDBSERVER"
+adb push $GDBSERVER $TMP_TARGET_GDBSERVER &>/dev/null
+adb shell $COMMAND_PREFIX cp $TMP_TARGET_GDBSERVER $TARGET_GDBSERVER
+adb shell rm $TMP_TARGET_GDBSERVER
+fail_panic "Could not copy gdbserver to the device!"
+
+if [ -z "$PORT" ]; then
+    PORT=5039
+fi
+HOST_PORT=$PORT
+TARGET_PORT=$PORT
+
+# Select correct app_process for architecture.
+case $TARGET_ARCH in
+      arm|x86|mips) GDBEXEC=app_process32;;
+      arm64|x86_64) GDBEXEC=app_process64;;
+      *) fail_panic "Unknown app_process for architecture!";;
+esac
+
+# Default to app_process if bit-width specific process isn't found.
+adb_shell ls /system/bin/$GDBEXEC
+if [ $? != 0 ]; then
+    GDBEXEC=app_process
+fi
+
+# Detect AddressSanitizer setup on the device. In that case app_process is a
+# script, and the real executable is app_process.real.
+GDBEXEC_ASAN=app_process.real
+adb_shell ls /system/bin/$GDBEXEC_ASAN
+if [ $? == 0 ]; then
+    GDBEXEC=$GDBEXEC_ASAN
+fi
+
+# Pull the app_process binary from the device.
+log "Pulling $GDBEXEC from device"
+adb pull /system/bin/$GDBEXEC "$TMPDIR"/$GDBEXEC &>/dev/null
+fail_panic "Could not retrieve $GDBEXEC from the device!"
+
+# Setup network redirection
+log "Setting network redirection (host:$HOST_PORT -> device:$TARGET_PORT)"
+adb forward tcp:$HOST_PORT tcp:$TARGET_PORT
+fail_panic "Could not setup network redirection from \
+host:localhost:$HOST_PORT to device:localhost:$TARGET_PORT!"
+
+# Start gdbserver in the background
+# Note that using run-as requires the package to be debuggable.
+#
+# If not, this will fail horribly. The alternative is to run the
+# program as root, which requires of course root privileges.
+# Maybe we should add a --root option to enable this?
+#
+log "Starting gdbserver in the background:"
+GDBSERVER_LOG=$TMPDIR/gdbserver-$TMP_ID.log
+log "adb shell $COMMAND_PREFIX $TARGET_GDBSERVER :$TARGET_PORT \
+  --attach $PID $COMMAND_SUFFIX"
+"$ADB" shell $COMMAND_PREFIX $TARGET_GDBSERVER :$TARGET_PORT \
+  --attach $PID $COMMAND_SUFFIX > $GDBSERVER_LOG 2>&1 &
+GDBSERVER_PID=$!
+echo "$GDBSERVER_PID" > $GDBSERVER_PIDFILE
+log "background job pid: $GDBSERVER_PID"
+
+# Sleep to allow gdbserver to attach to the remote process and be
+# ready to connect to.
+log "Sleeping ${ATTACH_DELAY}s to allow gdbserver to attach."
+sleep "$ATTACH_DELAY"
+log "Job control: $(jobs -l)"
+STATE=$(jobs -l | awk '$2 == "'$GDBSERVER_PID'" { print $3; }')
+if [ "$STATE" != "Running" ]; then
+  echo "ERROR: GDBServer either failed to run or attach to PID $PID!"
+  if [ $(adb_shell su -c getenforce) != "Permissive" ];  then
+    echo "Device mode is Enforcing. Changing Device mode to Permissive "
+    $(adb_shell su -c setenforce 0)
+    if [ $(adb_shell su -c getenforce) != "Permissive" ]; then
+      echo "ERROR: Failed to Change Device mode to Permissive"
+      echo "Failure log (use --verbose for more information):"
+      cat $GDBSERVER_LOG
+      exit 1
+    fi
+  else
+    echo "Failure log (use --verbose for more information):"
+    cat $GDBSERVER_LOG
+    exit 1
+  fi
+fi
+
+# Generate a file containing useful GDB initialization commands
+readonly COMMANDS=$TMPDIR/gdb.init
+log "Generating GDB initialization commands file: $COMMANDS"
+echo -n "" > $COMMANDS
+echo "set print pretty 1" >> $COMMANDS
+echo "python" >> $COMMANDS
+echo "import sys" >> $COMMANDS
+echo "sys.path.insert(0, '$CHROMIUM_SRC/tools/gdb/')" >> $COMMANDS
+echo "try:" >> $COMMANDS
+echo "  import gdb_chrome" >> $COMMANDS
+echo "finally:" >> $COMMANDS
+echo "  sys.path.pop(0)" >> $COMMANDS
+echo "end" >> $COMMANDS
+echo "file $TMPDIR/$GDBEXEC" >> $COMMANDS
+echo "directory $CHROMIUM_SRC" >> $COMMANDS
+echo "set solib-absolute-prefix $PULL_LIBS_DIR" >> $COMMANDS
+echo "set solib-search-path $SOLIB_DIRS:$PULL_LIBS_DIR:$SYMBOL_DIR" \
+    >> $COMMANDS
+echo "echo Attaching and reading symbols, this may take a while.." \
+    >> $COMMANDS
+echo "target remote :$HOST_PORT" >> $COMMANDS
+
+if [ "$GDBINIT" ]; then
+  cat "$GDBINIT" >> $COMMANDS
+fi
+
+if [ "$VERBOSE" -gt 0 ]; then
+  echo "### START $COMMANDS"
+  cat $COMMANDS
+  echo "### END $COMMANDS"
+fi
+
+log "Launching gdb client: $GDB $GDB_ARGS -x $COMMANDS"
+$GDB $GDB_ARGS -x $COMMANDS &&
+rm -f "$GDBSERVER_PIDFILE"
diff --git a/build/android/adb_gdb_android_webview_shell b/build/android/adb_gdb_android_webview_shell
new file mode 100755
index 0000000..f685fda
--- /dev/null
+++ b/build/android/adb_gdb_android_webview_shell
@@ -0,0 +1,16 @@
+#!/bin/bash
+#
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# Attach to or start a ContentShell process and debug it.
+# See --help for details.
+#
+PROGDIR=$(dirname "$0")
+export ADB_GDB_PROGNAME=$(basename "$0")
+export ADB_GDB_ACTIVITY=.AwShellActivity
+"$PROGDIR"/adb_gdb \
+    --program-name=AwShellApplication \
+    --package-name=org.chromium.android_webview.shell \
+    "$@"
diff --git a/build/android/adb_gdb_blimp_client b/build/android/adb_gdb_blimp_client
new file mode 100755
index 0000000..3c2e21d
--- /dev/null
+++ b/build/android/adb_gdb_blimp_client
@@ -0,0 +1,16 @@
+#!/bin/bash
+#
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# Attach to or start a Blimp process and debug it.
+# See --help for details.
+#
+PROGDIR=$(dirname "$0")
+export ADB_GDB_PROGNAME=$(basename "$0")
+export ADB_GDB_ACTIVITY=org.chromium.blimp.BlimpRendererActivity
+"$PROGDIR"/adb_gdb \
+    --program-name=Blimp \
+    --package-name=org.chromium.blimp \
+    "$@"
diff --git a/build/android/adb_gdb_chrome_public b/build/android/adb_gdb_chrome_public
new file mode 100755
index 0000000..4366c83
--- /dev/null
+++ b/build/android/adb_gdb_chrome_public
@@ -0,0 +1,16 @@
+#!/bin/bash
+#
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# Attach to or start a ChromePublic process and debug it.
+# See --help for details.
+#
+PROGDIR=$(dirname "$0")
+export ADB_GDB_PROGNAME=$(basename "$0")
+export ADB_GDB_ACTIVITY=com.google.android.apps.chrome.Main
+"$PROGDIR"/adb_gdb \
+    --program-name=ChromePublic \
+    --package-name=org.chromium.chrome \
+    "$@"
diff --git a/build/android/adb_gdb_content_shell b/build/android/adb_gdb_content_shell
new file mode 100755
index 0000000..18e1a61
--- /dev/null
+++ b/build/android/adb_gdb_content_shell
@@ -0,0 +1,16 @@
+#!/bin/bash
+#
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# Attach to or start a ContentShell process and debug it.
+# See --help for details.
+#
+PROGDIR=$(dirname "$0")
+export ADB_GDB_PROGNAME=$(basename "$0")
+export ADB_GDB_ACTIVITY=.ContentShellActivity
+"$PROGDIR"/adb_gdb \
+    --program-name=ContentShell \
+    --package-name=org.chromium.content_shell_apk \
+    "$@"
diff --git a/build/android/adb_gdb_cronet_sample b/build/android/adb_gdb_cronet_sample
new file mode 100755
index 0000000..8d0c864
--- /dev/null
+++ b/build/android/adb_gdb_cronet_sample
@@ -0,0 +1,16 @@
+#!/bin/bash
+#
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# Attach to or start a ContentShell process and debug it.
+# See --help for details.
+#
+PROGDIR=$(dirname "$0")
+export ADB_GDB_PROGNAME=$(basename "$0")
+export ADB_GDB_ACTIVITY=.CronetSampleActivity
+"$PROGDIR"/adb_gdb \
+    --program-name=CronetSample \
+    --package-name=org.chromium.cronet_sample_apk \
+    "$@"
diff --git a/build/android/adb_gdb_mojo_shell b/build/android/adb_gdb_mojo_shell
new file mode 100755
index 0000000..ba91149
--- /dev/null
+++ b/build/android/adb_gdb_mojo_shell
@@ -0,0 +1,16 @@
+#!/bin/bash
+#
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# Attach to or start a ContentShell process and debug it.
+# See --help for details.
+#
+PROGDIR=$(dirname "$0")
+export ADB_GDB_PROGNAME=$(basename "$0")
+export ADB_GDB_ACTIVITY=.MojoShellActivity
+"$PROGDIR"/adb_gdb \
+    --program-name=MojoShell \
+    --package-name=org.chromium.mojo_shell_apk \
+    "$@"
diff --git a/build/android/adb_install_apk.py b/build/android/adb_install_apk.py
new file mode 100755
index 0000000..bc55bf8
--- /dev/null
+++ b/build/android/adb_install_apk.py
@@ -0,0 +1,141 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Utility script to install APKs from the command line quickly."""
+
+import argparse
+import glob
+import logging
+import os
+import sys
+
+import devil_chromium
+from devil import devil_env
+from devil.android import apk_helper
+from devil.android import device_blacklist
+from devil.android import device_errors
+from devil.android import device_utils
+from devil.utils import run_tests_helper
+from pylib import constants
+
+
+def main():
+  parser = argparse.ArgumentParser()
+
+  apk_group = parser.add_mutually_exclusive_group(required=True)
+  apk_group.add_argument('--apk', dest='apk_name',
+                         help='DEPRECATED The name of the apk containing the'
+                              ' application (with the .apk extension).')
+  apk_group.add_argument('apk_path', nargs='?',
+                         help='The path to the APK to install.')
+
+  # TODO(jbudorick): Remove once no clients pass --apk_package
+  parser.add_argument('--apk_package', help='DEPRECATED unused')
+  parser.add_argument('--split',
+                      action='append',
+                      dest='splits',
+                      help='A glob matching the apk splits. '
+                           'Can be specified multiple times.')
+  parser.add_argument('--keep_data',
+                      action='store_true',
+                      default=False,
+                      help='Keep the package data when installing '
+                           'the application.')
+  parser.add_argument('--debug', action='store_const', const='Debug',
+                      dest='build_type',
+                      default=os.environ.get('BUILDTYPE', 'Debug'),
+                      help='If set, run test suites under out/Debug. '
+                           'Default is env var BUILDTYPE or Debug')
+  parser.add_argument('--release', action='store_const', const='Release',
+                      dest='build_type',
+                      help='If set, run test suites under out/Release. '
+                           'Default is env var BUILDTYPE or Debug.')
+  parser.add_argument('-d', '--device', dest='devices', action='append',
+                      default=[],
+                      help='Target device for apk to install on. Enter multiple'
+                           ' times for multiple devices.')
+  parser.add_argument('--adb-path',
+                      help='Absolute path to the adb binary to use.')
+  parser.add_argument('--blacklist-file', help='Device blacklist JSON file.')
+  parser.add_argument('-v', '--verbose', action='count',
+                      help='Enable verbose logging.')
+  parser.add_argument('--downgrade', action='store_true',
+                      help='If set, allows downgrading of apk.')
+  parser.add_argument('--timeout', type=int,
+                      default=device_utils.DeviceUtils.INSTALL_DEFAULT_TIMEOUT,
+                      help='Seconds to wait for APK installation. '
+                           '(default: %(default)s)')
+
+  args = parser.parse_args()
+
+  run_tests_helper.SetLogLevel(args.verbose)
+  constants.SetBuildType(args.build_type)
+
+  devil_custom_deps = None
+  if args.adb_path:
+    devil_custom_deps = {
+      'adb': {
+        devil_env.GetPlatform(): [args.adb_path],
+      },
+    }
+
+  devil_chromium.Initialize(
+      output_directory=constants.GetOutDirectory(),
+      custom_deps=devil_custom_deps)
+
+  apk = args.apk_path or args.apk_name
+  if not apk.endswith('.apk'):
+    apk += '.apk'
+  if not os.path.exists(apk):
+    apk = os.path.join(constants.GetOutDirectory(), 'apks', apk)
+    if not os.path.exists(apk):
+      parser.error('%s not found.' % apk)
+
+  if args.splits:
+    splits = []
+    base_apk_package = apk_helper.ApkHelper(apk).GetPackageName()
+    for split_glob in args.splits:
+      apks = [f for f in glob.glob(split_glob) if f.endswith('.apk')]
+      if not apks:
+        logging.warning('No apks matched for %s.', split_glob)
+      for f in apks:
+        helper = apk_helper.ApkHelper(f)
+        if (helper.GetPackageName() == base_apk_package
+            and helper.GetSplitName()):
+          splits.append(f)
+
+  blacklist = (device_blacklist.Blacklist(args.blacklist_file)
+               if args.blacklist_file
+               else None)
+  devices = device_utils.DeviceUtils.HealthyDevices(blacklist=blacklist,
+                                                    device_arg=args.devices)
+
+  def blacklisting_install(device):
+    try:
+      if args.splits:
+        device.InstallSplitApk(apk, splits, reinstall=args.keep_data,
+                               allow_downgrade=args.downgrade)
+      else:
+        device.Install(apk, reinstall=args.keep_data,
+                       allow_downgrade=args.downgrade,
+                       timeout=args.timeout)
+    except device_errors.CommandFailedError:
+      logging.exception('Failed to install %s', args.apk_name)
+      if blacklist:
+        blacklist.Extend([str(device)], reason='install_failure')
+        logging.warning('Blacklisting %s', str(device))
+    except device_errors.CommandTimeoutError:
+      logging.exception('Timed out while installing %s', args.apk_name)
+      if blacklist:
+        blacklist.Extend([str(device)], reason='install_timeout')
+        logging.warning('Blacklisting %s', str(device))
+
+  device_utils.DeviceUtils.parallel(devices).pMap(blacklisting_install)
+
+
+if __name__ == '__main__':
+  sys.exit(main())
+
diff --git a/build/android/adb_kill_android_webview_shell b/build/android/adb_kill_android_webview_shell
new file mode 100755
index 0000000..5f287f0
--- /dev/null
+++ b/build/android/adb_kill_android_webview_shell
@@ -0,0 +1,24 @@
+#!/bin/bash
+#
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# Kill a running android webview shell.
+#
+# Assumes you have sourced the build/android/envsetup.sh script.
+
+SHELL_PID_LINES=$(adb shell ps | grep ' org.chromium.android_webview.shell')
+VAL=$(echo "$SHELL_PID_LINES" | wc -l)
+if [ $VAL -lt 1 ] ; then
+   echo "Not running android webview shell."
+else
+   SHELL_PID=$(echo $SHELL_PID_LINES | awk '{print $2}')
+   if [ "$SHELL_PID" != "" ] ; then
+      set -x
+      adb shell kill $SHELL_PID
+      set -
+   else
+     echo "Android webview shell does not appear to be running."
+   fi
+fi
diff --git a/build/android/adb_kill_blimp_client b/build/android/adb_kill_blimp_client
new file mode 100755
index 0000000..6221e45
--- /dev/null
+++ b/build/android/adb_kill_blimp_client
@@ -0,0 +1,24 @@
+#!/bin/bash
+#
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# Kill a running instance of Blimp.
+#
+# Assumes you have sourced the build/android/envsetup.sh script.
+
+SHELL_PID_LINES=$(adb shell ps | grep -w 'org.chromium.blimp')
+VAL=$(echo "$SHELL_PID_LINES" | wc -l)
+if [ $VAL -lt 1 ] ; then
+   echo "Not running Blimp."
+else
+   SHELL_PID=$(echo $SHELL_PID_LINES | awk '{print $2}')
+   if [ "$SHELL_PID" != "" ] ; then
+      set -x
+      adb shell kill $SHELL_PID
+      set -
+   else
+     echo "Blimp does not appear to be running."
+   fi
+fi
diff --git a/build/android/adb_kill_chrome_public b/build/android/adb_kill_chrome_public
new file mode 100755
index 0000000..5b539a0
--- /dev/null
+++ b/build/android/adb_kill_chrome_public
@@ -0,0 +1,24 @@
+#!/bin/bash
+#
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# Kill a running instance of ChromePublic.
+#
+# Assumes you have sourced the build/android/envsetup.sh script.
+
+SHELL_PID_LINES=$(adb shell ps | grep -w 'org.chromium.chrome')
+VAL=$(echo "$SHELL_PID_LINES" | wc -l)
+if [ $VAL -lt 1 ] ; then
+   echo "Not running ChromePublic."
+else
+   SHELL_PID=$(echo $SHELL_PID_LINES | awk '{print $2}')
+   if [ "$SHELL_PID" != "" ] ; then
+      set -x
+      adb shell kill $SHELL_PID
+      set -
+   else
+     echo "ChromePublic does not appear to be running."
+   fi
+fi
diff --git a/build/android/adb_kill_content_shell b/build/android/adb_kill_content_shell
new file mode 100755
index 0000000..e379dd4
--- /dev/null
+++ b/build/android/adb_kill_content_shell
@@ -0,0 +1,24 @@
+#!/bin/bash
+#
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# Kill a running content shell.
+#
+# Assumes you have sourced the build/android/envsetup.sh script.
+
+SHELL_PID_LINES=$(adb shell ps | grep ' org.chromium.content_shell_apk')
+VAL=$(echo "$SHELL_PID_LINES" | wc -l)
+if [ $VAL -lt 1 ] ; then
+   echo "Not running Content shell."
+else
+   SHELL_PID=$(echo $SHELL_PID_LINES | awk '{print $2}')
+   if [ "$SHELL_PID" != "" ] ; then
+      set -x
+      adb shell kill $SHELL_PID
+      set -
+   else
+     echo "Content shell does not appear to be running."
+   fi
+fi
diff --git a/build/android/adb_logcat_monitor.py b/build/android/adb_logcat_monitor.py
new file mode 100755
index 0000000..d3cc67d
--- /dev/null
+++ b/build/android/adb_logcat_monitor.py
@@ -0,0 +1,156 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Saves logcats from all connected devices.
+
+Usage: adb_logcat_monitor.py <base_dir> [<adb_binary_path>]
+
+This script will repeatedly poll adb for new devices and save logcats
+inside the <base_dir> directory, which it attempts to create.  The
+script will run until killed by an external signal.  To test, run the
+script in a shell and <Ctrl>-C it after a while.  It should be
+resilient across phone disconnects and reconnects and start the logcat
+early enough to not miss anything.
+"""
+
+import logging
+import os
+import re
+import shutil
+import signal
+import subprocess
+import sys
+import time
+
+# Map from device_id -> (process, logcat_num)
+devices = {}
+
+
+class TimeoutException(Exception):
+  """Exception used to signal a timeout."""
+  pass
+
+
+class SigtermError(Exception):
+  """Exception used to catch a sigterm."""
+  pass
+
+
+def StartLogcatIfNecessary(device_id, adb_cmd, base_dir):
+  """Spawns a adb logcat process if one is not currently running."""
+  process, logcat_num = devices[device_id]
+  if process:
+    if process.poll() is None:
+      # Logcat process is still happily running
+      return
+    else:
+      logging.info('Logcat for device %s has died', device_id)
+      error_filter = re.compile('- waiting for device -')
+      for line in process.stderr:
+        if not error_filter.match(line):
+          logging.error(device_id + ':   ' + line)
+
+  logging.info('Starting logcat %d for device %s', logcat_num,
+               device_id)
+  logcat_filename = 'logcat_%s_%03d' % (device_id, logcat_num)
+  logcat_file = open(os.path.join(base_dir, logcat_filename), 'w')
+  process = subprocess.Popen([adb_cmd, '-s', device_id,
+                              'logcat', '-v', 'threadtime'],
+                             stdout=logcat_file,
+                             stderr=subprocess.PIPE)
+  devices[device_id] = (process, logcat_num + 1)
+
+
+def GetAttachedDevices(adb_cmd):
+  """Gets the device list from adb.
+
+  We use an alarm in this function to avoid deadlocking from an external
+  dependency.
+
+  Args:
+    adb_cmd: binary to run adb
+
+  Returns:
+    list of devices or an empty list on timeout
+  """
+  signal.alarm(2)
+  try:
+    out, err = subprocess.Popen([adb_cmd, 'devices'],
+                                stdout=subprocess.PIPE,
+                                stderr=subprocess.PIPE).communicate()
+    if err:
+      logging.warning('adb device error %s', err.strip())
+    return re.findall('^(\\S+)\tdevice$', out, re.MULTILINE)
+  except TimeoutException:
+    logging.warning('"adb devices" command timed out')
+    return []
+  except (IOError, OSError):
+    logging.exception('Exception from "adb devices"')
+    return []
+  finally:
+    signal.alarm(0)
+
+
+def main(base_dir, adb_cmd='adb'):
+  """Monitor adb forever.  Expects a SIGINT (Ctrl-C) to kill."""
+  # We create the directory to ensure 'run once' semantics
+  if os.path.exists(base_dir):
+    print 'adb_logcat_monitor: %s already exists? Cleaning' % base_dir
+    shutil.rmtree(base_dir, ignore_errors=True)
+
+  os.makedirs(base_dir)
+  logging.basicConfig(filename=os.path.join(base_dir, 'eventlog'),
+                      level=logging.INFO,
+                      format='%(asctime)-2s %(levelname)-8s %(message)s')
+
+  # Set up the alarm for calling 'adb devices'. This is to ensure
+  # our script doesn't get stuck waiting for a process response
+  def TimeoutHandler(_signum, _unused_frame):
+    raise TimeoutException()
+  signal.signal(signal.SIGALRM, TimeoutHandler)
+
+  # Handle SIGTERMs to ensure clean shutdown
+  def SigtermHandler(_signum, _unused_frame):
+    raise SigtermError()
+  signal.signal(signal.SIGTERM, SigtermHandler)
+
+  logging.info('Started with pid %d', os.getpid())
+  pid_file_path = os.path.join(base_dir, 'LOGCAT_MONITOR_PID')
+
+  try:
+    with open(pid_file_path, 'w') as f:
+      f.write(str(os.getpid()))
+    while True:
+      for device_id in GetAttachedDevices(adb_cmd):
+        if not device_id in devices:
+          subprocess.call([adb_cmd, '-s', device_id, 'logcat', '-c'])
+          devices[device_id] = (None, 0)
+
+      for device in devices:
+        # This will spawn logcat watchers for any device ever detected
+        StartLogcatIfNecessary(device, adb_cmd, base_dir)
+
+      time.sleep(5)
+  except SigtermError:
+    logging.info('Received SIGTERM, shutting down')
+  except: # pylint: disable=bare-except
+    logging.exception('Unexpected exception in main.')
+  finally:
+    for process, _ in devices.itervalues():
+      if process:
+        try:
+          process.terminate()
+        except OSError:
+          pass
+    os.remove(pid_file_path)
+
+
+if __name__ == '__main__':
+  if 2 <= len(sys.argv) <= 3:
+    print 'adb_logcat_monitor: Initializing'
+    sys.exit(main(*sys.argv[1:3]))
+
+  print 'Usage: %s <base_dir> [<adb_binary_path>]' % sys.argv[0]
diff --git a/build/android/adb_logcat_printer.py b/build/android/adb_logcat_printer.py
new file mode 100755
index 0000000..a715170
--- /dev/null
+++ b/build/android/adb_logcat_printer.py
@@ -0,0 +1,222 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Shutdown adb_logcat_monitor and print accumulated logs.
+
+To test, call './adb_logcat_printer.py <base_dir>' where
+<base_dir> contains 'adb logcat -v threadtime' files named as
+logcat_<deviceID>_<sequenceNum>
+
+The script will print the files to out, and will combine multiple
+logcats from a single device if there is overlap.
+
+Additionally, if a <base_dir>/LOGCAT_MONITOR_PID exists, the script
+will attempt to terminate the contained PID by sending a SIGINT and
+monitoring for the deletion of the aforementioned file.
+"""
+# pylint: disable=W0702
+
+import cStringIO
+import logging
+import optparse
+import os
+import re
+import signal
+import sys
+import time
+
+
+# Set this to debug for more verbose output
+LOG_LEVEL = logging.INFO
+
+
+def CombineLogFiles(list_of_lists, logger):
+  """Splices together multiple logcats from the same device.
+
+  Args:
+    list_of_lists: list of pairs (filename, list of timestamped lines)
+    logger: handler to log events
+
+  Returns:
+    list of lines with duplicates removed
+  """
+  cur_device_log = ['']
+  for cur_file, cur_file_lines in list_of_lists:
+    # Ignore files with just the logcat header
+    if len(cur_file_lines) < 2:
+      continue
+    common_index = 0
+    # Skip this step if list just has empty string
+    if len(cur_device_log) > 1:
+      try:
+        line = cur_device_log[-1]
+        # Used to make sure we only splice on a timestamped line
+        if re.match(r'^\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{3} ', line):
+          common_index = cur_file_lines.index(line)
+        else:
+          logger.warning('splice error - no timestamp in "%s"?', line.strip())
+      except ValueError:
+        # The last line was valid but wasn't found in the next file
+        cur_device_log += ['***** POSSIBLE INCOMPLETE LOGCAT *****']
+        logger.info('Unable to splice %s. Incomplete logcat?', cur_file)
+
+    cur_device_log += ['*'*30 + '  %s' % cur_file]
+    cur_device_log.extend(cur_file_lines[common_index:])
+
+  return cur_device_log
+
+
+def FindLogFiles(base_dir):
+  """Search a directory for logcat files.
+
+  Args:
+    base_dir: directory to search
+
+  Returns:
+    Mapping of device_id to a sorted list of file paths for a given device
+  """
+  logcat_filter = re.compile(r'^logcat_(\S+)_(\d+)$')
+  # list of tuples (<device_id>, <seq num>, <full file path>)
+  filtered_list = []
+  for cur_file in os.listdir(base_dir):
+    matcher = logcat_filter.match(cur_file)
+    if matcher:
+      filtered_list += [(matcher.group(1), int(matcher.group(2)),
+                         os.path.join(base_dir, cur_file))]
+  filtered_list.sort()
+  file_map = {}
+  for device_id, _, cur_file in filtered_list:
+    if device_id not in file_map:
+      file_map[device_id] = []
+
+    file_map[device_id] += [cur_file]
+  return file_map
+
+
+def GetDeviceLogs(log_filenames, logger):
+  """Read log files, combine and format.
+
+  Args:
+    log_filenames: mapping of device_id to sorted list of file paths
+    logger: logger handle for logging events
+
+  Returns:
+    list of formatted device logs, one for each device.
+  """
+  device_logs = []
+
+  for device, device_files in log_filenames.iteritems():
+    logger.debug('%s: %s', device, str(device_files))
+    device_file_lines = []
+    for cur_file in device_files:
+      with open(cur_file) as f:
+        device_file_lines += [(cur_file, f.read().splitlines())]
+    combined_lines = CombineLogFiles(device_file_lines, logger)
+    # Prepend each line with a short unique ID so it's easy to see
+    # when the device changes.  We don't use the start of the device
+    # ID because it can be the same among devices.  Example lines:
+    # AB324:  foo
+    # AB324:  blah
+    device_logs += [('\n' + device[-5:] + ':  ').join(combined_lines)]
+  return device_logs
+
+
+def ShutdownLogcatMonitor(base_dir, logger):
+  """Attempts to shutdown adb_logcat_monitor and blocks while waiting."""
+  try:
+    monitor_pid_path = os.path.join(base_dir, 'LOGCAT_MONITOR_PID')
+    with open(monitor_pid_path) as f:
+      monitor_pid = int(f.readline())
+
+    logger.info('Sending SIGTERM to %d', monitor_pid)
+    os.kill(monitor_pid, signal.SIGTERM)
+    i = 0
+    while True:
+      time.sleep(.2)
+      if not os.path.exists(monitor_pid_path):
+        return
+      if not os.path.exists('/proc/%d' % monitor_pid):
+        logger.warning('Monitor (pid %d) terminated uncleanly?', monitor_pid)
+        return
+      logger.info('Waiting for logcat process to terminate.')
+      i += 1
+      if i >= 10:
+        logger.warning('Monitor pid did not terminate. Continuing anyway.')
+        return
+
+  except (ValueError, IOError, OSError):
+    logger.exception('Error signaling logcat monitor - continuing')
+
+
+def main(argv):
+  parser = optparse.OptionParser(usage='Usage: %prog [options] <log dir>')
+  parser.add_option('--output-path',
+                    help='Output file path (if unspecified, prints to stdout)')
+  options, args = parser.parse_args(argv)
+  if len(args) != 1:
+    parser.error('Wrong number of unparsed args')
+  base_dir = args[0]
+
+  log_stringio = cStringIO.StringIO()
+  logger = logging.getLogger('LogcatPrinter')
+  logger.setLevel(LOG_LEVEL)
+  sh = logging.StreamHandler(log_stringio)
+  sh.setFormatter(logging.Formatter('%(asctime)-2s %(levelname)-8s'
+                                    ' %(message)s'))
+  logger.addHandler(sh)
+
+  if options.output_path:
+    if not os.path.exists(os.path.dirname(options.output_path)):
+      logger.warning('Output dir %s doesn\'t exist. Creating it.',
+                      os.path.dirname(options.output_path))
+      os.makedirs(os.path.dirname(options.output_path))
+    output_file = open(options.output_path, 'w')
+    logger.info('Dumping logcat to local file %s. If running in a build, '
+                'this file will likely will be uploaded to google storage '
+                'in a later step. It can be downloaded from there.',
+                options.output_path)
+  else:
+    output_file = sys.stdout
+
+  try:
+    # Wait at least 5 seconds after base_dir is created before printing.
+    #
+    # The idea is that 'adb logcat > file' output consists of 2 phases:
+    #  1 Dump all the saved logs to the file
+    #  2 Stream log messages as they are generated
+    #
+    # We want to give enough time for phase 1 to complete.  There's no
+    # good method to tell how long to wait, but it usually only takes a
+    # second.  On most bots, this code path won't occur at all, since
+    # adb_logcat_monitor.py command will have spawned more than 5 seconds
+    # prior to called this shell script.
+    try:
+      sleep_time = 5 - (time.time() - os.path.getctime(base_dir))
+    except OSError:
+      sleep_time = 5
+    if sleep_time > 0:
+      logger.warning('Monitor just started? Sleeping %.1fs', sleep_time)
+      time.sleep(sleep_time)
+
+    assert os.path.exists(base_dir), '%s does not exist' % base_dir
+    ShutdownLogcatMonitor(base_dir, logger)
+    separator = '\n' + '*' * 80 + '\n\n'
+    for log in GetDeviceLogs(FindLogFiles(base_dir), logger):
+      output_file.write(log)
+      output_file.write(separator)
+    with open(os.path.join(base_dir, 'eventlog')) as f:
+      output_file.write('\nLogcat Monitor Event Log\n')
+      output_file.write(f.read())
+  except:
+    logger.exception('Unexpected exception')
+
+  logger.info('Done.')
+  sh.flush()
+  output_file.write('\nLogcat Printer Event Log\n')
+  output_file.write(log_stringio.getvalue())
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv[1:]))
diff --git a/build/android/adb_profile_chrome b/build/android/adb_profile_chrome
new file mode 100755
index 0000000..d3244ff
--- /dev/null
+++ b/build/android/adb_profile_chrome
@@ -0,0 +1,9 @@
+#!/bin/bash
+#
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# Start / stop profiling in chrome.
+CATAPULT_DIR="$(dirname "$0")"/../../third_party/catapult
+exec "${CATAPULT_DIR}"/systrace/bin/adb_profile_chrome "$@"
diff --git a/build/android/adb_profile_chrome_startup b/build/android/adb_profile_chrome_startup
new file mode 100755
index 0000000..d5836cd
--- /dev/null
+++ b/build/android/adb_profile_chrome_startup
@@ -0,0 +1,9 @@
+#!/bin/bash
+#
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# Start / stop profiling for chrome startup.
+CATAPULT_DIR="$(dirname "$0")"/../../third_party/catapult
+exec "${CATAPULT_DIR}"/systrace/bin/adb_profile_chrome_startup "$@"
diff --git a/build/android/adb_reverse_forwarder.py b/build/android/adb_reverse_forwarder.py
new file mode 100755
index 0000000..b0a8dc3
--- /dev/null
+++ b/build/android/adb_reverse_forwarder.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Command line tool for forwarding ports from a device to the host.
+
+Allows an Android device to connect to services running on the host machine,
+i.e., "adb forward" in reverse. Requires |host_forwarder| and |device_forwarder|
+to be built.
+"""
+
+import optparse
+import sys
+import time
+
+import devil_chromium
+
+from devil.android import device_blacklist
+from devil.android import device_utils
+from devil.android import forwarder
+from devil.utils import run_tests_helper
+
+from pylib import constants
+
+
+def main(argv):
+  parser = optparse.OptionParser(usage='Usage: %prog [options] device_port '
+                                 'host_port [device_port_2 host_port_2] ...',
+                                 description=__doc__)
+  parser.add_option('-v',
+                    '--verbose',
+                    dest='verbose_count',
+                    default=0,
+                    action='count',
+                    help='Verbose level (multiple times for more)')
+  parser.add_option('--device',
+                    help='Serial number of device we should use.')
+  parser.add_option('--blacklist-file', help='Device blacklist JSON file.')
+  parser.add_option('--debug', action='store_const', const='Debug',
+                    dest='build_type', default='Release',
+                    help='Use Debug build of host tools instead of Release.')
+
+  options, args = parser.parse_args(argv)
+  run_tests_helper.SetLogLevel(options.verbose_count)
+
+  devil_chromium.Initialize()
+
+  if len(args) < 2 or not len(args) % 2:
+    parser.error('Need even number of port pairs')
+    sys.exit(1)
+
+  try:
+    port_pairs = [int(a) for a in args[1:]]
+    port_pairs = zip(port_pairs[::2], port_pairs[1::2])
+  except ValueError:
+    parser.error('Bad port number')
+    sys.exit(1)
+
+  blacklist = (device_blacklist.Blacklist(options.blacklist_file)
+               if options.blacklist_file
+               else None)
+  device = device_utils.DeviceUtils.HealthyDevices(
+      blacklist=blacklist, device_arg=options.device)[0]
+  constants.SetBuildType(options.build_type)
+  try:
+    forwarder.Forwarder.Map(port_pairs, device)
+    while True:
+      time.sleep(60)
+  except KeyboardInterrupt:
+    sys.exit(0)
+  finally:
+    forwarder.Forwarder.UnmapAllDevicePorts(device)
+
+if __name__ == '__main__':
+  main(sys.argv)
diff --git a/build/android/adb_run_android_webview_shell b/build/android/adb_run_android_webview_shell
new file mode 100755
index 0000000..1014a73
--- /dev/null
+++ b/build/android/adb_run_android_webview_shell
@@ -0,0 +1,12 @@
+#!/bin/bash
+#
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+optional_url=$1
+
+adb shell am start \
+  -a android.intent.action.VIEW \
+  -n org.chromium.android_webview.shell/.AwShellActivity \
+  ${optional_url:+-d "$optional_url"}
diff --git a/build/android/adb_run_blimp_client b/build/android/adb_run_blimp_client
new file mode 100755
index 0000000..4b3b4a8
--- /dev/null
+++ b/build/android/adb_run_blimp_client
@@ -0,0 +1,12 @@
+#!/bin/bash
+#
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+optional_url=$1
+
+adb shell am start \
+  -a android.intent.action.VIEW \
+  -n org.chromium.blimp/org.chromium.blimp.BlimpRendererActivity \
+  ${optional_url:+-d "$optional_url"}
diff --git a/build/android/adb_run_chrome_public b/build/android/adb_run_chrome_public
new file mode 100755
index 0000000..bf15071
--- /dev/null
+++ b/build/android/adb_run_chrome_public
@@ -0,0 +1,12 @@
+#!/bin/bash
+#
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+optional_url=$1
+
+adb shell am start \
+  -a android.intent.action.VIEW \
+  -n org.chromium.chrome/com.google.android.apps.chrome.Main \
+  ${optional_url:+-d "$optional_url"}
diff --git a/build/android/adb_run_content_shell b/build/android/adb_run_content_shell
new file mode 100755
index 0000000..3f01f3b
--- /dev/null
+++ b/build/android/adb_run_content_shell
@@ -0,0 +1,12 @@
+#!/bin/bash
+#
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+optional_url=$1
+
+adb shell am start \
+  -a android.intent.action.VIEW \
+  -n org.chromium.content_shell_apk/.ContentShellActivity \
+  ${optional_url:+-d "$optional_url"}
diff --git a/build/android/adb_run_mojo_shell b/build/android/adb_run_mojo_shell
new file mode 100755
index 0000000..b585e4a
--- /dev/null
+++ b/build/android/adb_run_mojo_shell
@@ -0,0 +1,16 @@
+#!/bin/bash
+#
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+optional_url=$1
+parameters=$2
+
+adb logcat -c
+adb shell am start -S \
+  -a android.intent.action.VIEW \
+  -n org.chromium.mojo_shell_apk/.MojoShellActivity \
+  ${parameters:+--esa parameters "$parameters"} \
+  ${optional_url:+-d "$optional_url"}
+adb logcat -s MojoShellApplication MojoShellActivity chromium
diff --git a/build/android/adb_run_system_webview_shell b/build/android/adb_run_system_webview_shell
new file mode 100755
index 0000000..5d0c0e4
--- /dev/null
+++ b/build/android/adb_run_system_webview_shell
@@ -0,0 +1,15 @@
+#!/bin/bash
+#
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Runs a 'mini-browser' using System WebView with an optional url as parameter.
+# SystemWebViewShell.apk should be installed for this to work.
+
+optional_url=$1
+
+adb shell am start \
+  -a android.intent.action.VIEW \
+  -n org.chromium.webview_shell/.WebViewBrowserActivity \
+  ${optional_url:+-d "$optional_url"}
diff --git a/build/android/adb_system_webview_command_line b/build/android/adb_system_webview_command_line
new file mode 100755
index 0000000..376b0b3
--- /dev/null
+++ b/build/android/adb_system_webview_command_line
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# If no flags are given, prints the current content shell flags.
+#
+# Otherwise, the given flags are used to REPLACE (not modify) the content shell
+# flags. For example:
+#   adb_system_webview_command_line --enable-webgl
+#
+# To remove all content shell flags, pass an empty string for the flags:
+#   adb_android_webview_command_line ""
+
+exec $(dirname $0)/adb_command_line.py --device-path \
+    /data/local/tmp/webview-command-line "$@"
diff --git a/build/android/android.isolate b/build/android/android.isolate
new file mode 100644
index 0000000..dfedc6f
--- /dev/null
+++ b/build/android/android.isolate
@@ -0,0 +1,29 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+  'variables': {
+    'files': [
+      '../../build/util/lib/common/',
+      '../../third_party/android_tools/sdk/build-tools/',
+      '../../third_party/android_tools/sdk/platform-tools/',
+      '../../third_party/appurify-python/',
+      '../../third_party/catapult/',
+      '../../third_party/requests/',
+      '../../tools/swarming_client/',
+      '<(PRODUCT_DIR)/icudtl.dat',
+      '<(PRODUCT_DIR)/lib.java/chromium_commands.dex.jar',
+      '<(PRODUCT_DIR)/host_forwarder',
+      '<(PRODUCT_DIR)/forwarder_dist/',
+      '<(PRODUCT_DIR)/md5sum_bin_host',
+      '<(PRODUCT_DIR)/md5sum_dist/',
+      'devil_chromium.json',
+      'devil_chromium.py',
+      'gyp/util/',
+      'incremental_install/',
+      'lighttpd_server.py',
+      'pylib/',
+      'test_runner.py',
+    ]
+  }
+}
diff --git a/build/android/android_lint_cache.gyp b/build/android/android_lint_cache.gyp
new file mode 100644
index 0000000..72b9e9e
--- /dev/null
+++ b/build/android/android_lint_cache.gyp
@@ -0,0 +1,51 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+  'targets': [
+    {
+      # This target runs a functionally empty lint to create or update the
+      # API versions cache if necessary. This prevents racy creation of the
+      # cache while linting java targets in lint_action.gypi.
+      'target_name': 'android_lint_cache',
+      'type': 'none',
+      'actions': [
+        {
+          'action_name': 'prepare_android_lint_cache',
+          'message': 'Preparing Android lint cache',
+          'variables': {
+            'android_lint_cache_stamp': '<(PRODUCT_DIR)/android_lint_cache/android_lint_cache.stamp',
+            'android_manifest_path': '<(DEPTH)/build/android/AndroidManifest.xml',
+            'result_path': '<(PRODUCT_DIR)/android_lint_cache/result.xml',
+            'platform_xml_path': '<(android_sdk_root)/platform-tools/api/api-versions.xml',
+          },
+          'inputs': [
+            '<(DEPTH)/build/android/gyp/util/build_utils.py',
+            '<(DEPTH)/build/android/gyp/lint.py',
+            '<(android_manifest_path)',
+            '<(platform_xml_path)',
+          ],
+          'outputs': [
+            '<(android_lint_cache_stamp)',
+            '<(result_path)',
+          ],
+          'action': [
+            'python', '<(DEPTH)/build/android/gyp/lint.py',
+            '--lint-path', '<(android_sdk_root)/tools/lint',
+            '--cache-dir', '<(PRODUCT_DIR)/android_lint_cache',
+            '--android-sdk-version=<(android_sdk_version)',
+            '--platform-xml-path', '<(platform_xml_path)',
+            '--manifest-path', '<(android_manifest_path)',
+            '--product-dir', '<(PRODUCT_DIR)',
+            '--result-path', '<(result_path)',
+            '--stamp', '<(android_lint_cache_stamp)',
+            '--create-cache',
+            '--silent',
+            '--enable'
+          ],
+        },
+      ],
+    },
+  ],
+}
diff --git a/build/android/android_no_jni_exports.lst b/build/android/android_no_jni_exports.lst
new file mode 100644
index 0000000..ffc6cf7
--- /dev/null
+++ b/build/android/android_no_jni_exports.lst
@@ -0,0 +1,17 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This script makes all JNI exported symbols local, to prevent the JVM from
+# being able to find them, enforcing use of manual JNI function registration.
+# This is used for all Android binaries by default, unless they explicitly state
+# that they want JNI exported symbols to remain visible, as we need to ensure
+# the manual registration path is correct to maintain compatibility with the
+# crazy linker.
+# Check ld version script manual:
+# https://sourceware.org/binutils/docs-2.24/ld/VERSION.html#VERSION
+
+{
+  local:
+    Java_*;
+};
diff --git a/build/android/ant/apk-package.xml b/build/android/ant/apk-package.xml
new file mode 100644
index 0000000..cb79560
--- /dev/null
+++ b/build/android/ant/apk-package.xml
@@ -0,0 +1,125 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+    Copyright (C) 2005-2008 The Android Open Source Project
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+         http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+-->
+
+<project default="-package">
+  <property name="verbose" value="false" />
+  <property name="out.dir" location="${OUT_DIR}" />
+  <property name="out.absolute.dir" location="${out.dir}" />
+
+  <property name="sdk.dir" location="${ANDROID_SDK_ROOT}"/>
+  <property name="emma.device.jar" location="${EMMA_DEVICE_JAR}" />
+
+  <condition property="emma.enabled" value="true" else="false">
+    <equals arg1="${EMMA_INSTRUMENT}" arg2="1"/>
+  </condition>
+
+  <!-- jar file from where the tasks are loaded -->
+  <path id="android.antlibs">
+    <pathelement path="${sdk.dir}/tools/lib/ant-tasks.jar" />
+  </path>
+
+  <!-- Custom tasks -->
+  <taskdef resource="anttasks.properties" classpathref="android.antlibs" />
+
+  <condition property="build.target" value="release" else="debug">
+    <equals arg1="${CONFIGURATION_NAME}" arg2="Release" />
+  </condition>
+  <condition property="build.is.packaging.debug" value="true" else="false">
+    <equals arg1="${build.target}" arg2="debug" />
+  </condition>
+
+  <!-- Disables automatic signing. -->
+  <property name="build.is.signing.debug" value="false"/>
+
+  <!-- SDK tools assume that out.packaged.file is signed and name it "...-unaligned" -->
+  <property name="out.packaged.file" value="${UNSIGNED_APK_PATH}" />
+
+  <property name="native.libs.absolute.dir" location="${NATIVE_LIBS_DIR}" />
+
+  <!-- Intermediate files -->
+  <property name="resource.package.file.name" value="${RESOURCE_PACKAGED_APK_NAME}" />
+
+  <property name="intermediate.dex.file" location="${DEX_FILE_PATH}" />
+  <condition property="multidex.enabled" value="true">
+    <equals arg1="${MULTIDEX_ENABLED}" arg2="1"/>
+  </condition>
+
+  <!-- Macro that enables passing a variable list of external jar files
+       to ApkBuilder. -->
+  <macrodef name="package-helper">
+    <element name="extra-jars" optional="yes" />
+    <sequential>
+      <apkbuilder
+          outfolder="${out.absolute.dir}"
+          resourcefile="${resource.package.file.name}"
+          apkfilepath="${out.packaged.file}"
+          debugpackaging="${build.is.packaging.debug}"
+          debugsigning="${build.is.signing.debug}"
+          verbose="${verbose}"
+          hascode="${HAS_CODE}"
+          previousBuildType="/"
+          buildType="${build.is.packaging.debug}/${build.is.signing.debug}">
+        <dex path="${intermediate.dex.file}" />
+        <nativefolder path="${native.libs.absolute.dir}" />
+        <extra-jars/>
+      </apkbuilder>
+    </sequential>
+  </macrodef>
+
+  <macrodef name="multidex-package-helper">
+    <element name="extra-jars" optional="yes" />
+    <sequential>
+      <apkbuilder
+          outfolder="${out.absolute.dir}"
+          resourcefile="${resource.package.file.name}"
+          apkfilepath="${out.packaged.file}"
+          debugpackaging="${build.is.packaging.debug}"
+          debugsigning="${build.is.signing.debug}"
+          verbose="${verbose}"
+          hascode="false"
+          previousBuildType="/"
+          buildType="${build.is.packaging.debug}/${build.is.signing.debug}">
+        <zip path="${intermediate.dex.file}" />
+        <nativefolder path="${native.libs.absolute.dir}" />
+        <extra-jars/>
+      </apkbuilder>
+    </sequential>
+  </macrodef>
+
+  <!-- Packages the application. -->
+  <target name="-package">
+    <if condition="${emma.enabled}">
+      <then>
+        <package-helper>
+          <extra-jars>
+            <jarfile path="${emma.device.jar}" />
+          </extra-jars>
+        </package-helper>
+      </then>
+      <else>
+        <if condition="${multidex.enabled}">
+          <then>
+            <multidex-package-helper />
+          </then>
+          <else>
+            <package-helper />
+          </else>
+        </if>
+      </else>
+    </if>
+  </target>
+</project>
diff --git a/build/android/ant/chromium-debug.keystore b/build/android/ant/chromium-debug.keystore
new file mode 100644
index 0000000..67eb0aa
--- /dev/null
+++ b/build/android/ant/chromium-debug.keystore
Binary files differ
diff --git a/build/android/ant/empty/res/.keep b/build/android/ant/empty/res/.keep
new file mode 100644
index 0000000..1fd038b
--- /dev/null
+++ b/build/android/ant/empty/res/.keep
@@ -0,0 +1,2 @@
+# This empty res folder can be passed to aapt while building Java libraries or
+# APKs that don't have any resources.
diff --git a/build/android/apkbuilder_action.gypi b/build/android/apkbuilder_action.gypi
new file mode 100644
index 0000000..e073e9b
--- /dev/null
+++ b/build/android/apkbuilder_action.gypi
@@ -0,0 +1,84 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This file is a helper to java_apk.gypi. It should be used to create an
+# action that runs ApkBuilder via ANT.
+#
+# Required variables:
+#  apk_name - File name (minus path & extension) of the output apk.
+#  apk_path - Path to output apk.
+#  package_input_paths - Late-evaluated list of resource zips.
+#  native_libs_dir - Path to lib/ directory to use. Set to an empty directory
+#    if no native libs are needed.
+# Optional variables:
+#  has_code - Whether to include classes.dex in the apk.
+#  dex_path - Path to classes.dex. Used only when has_code=1.
+#  extra_inputs - List of extra action inputs.
+{
+  'variables': {
+    'variables': {
+      'has_code%': 1,
+    },
+    'conditions': [
+      ['has_code == 0', {
+        'has_code_str': 'false',
+      }, {
+        'has_code_str': 'true',
+      }],
+    ],
+    'has_code%': '<(has_code)',
+    'extra_inputs%': [],
+    # Write the inputs list to a file, so that its mtime is updated when
+    # the list of inputs changes.
+    'inputs_list_file': '>|(apk_package.<(_target_name).<(apk_name).gypcmd >@(package_input_paths))',
+    'resource_packaged_apk_name': '<(apk_name)-resources.ap_',
+    'resource_packaged_apk_path': '<(intermediate_dir)/<(resource_packaged_apk_name)',
+  },
+  'action_name': 'apkbuilder_<(apk_name)',
+  'message': 'Packaging <(apk_name)',
+  'inputs': [
+    '<(DEPTH)/build/android/ant/apk-package.xml',
+    '<(DEPTH)/build/android/gyp/util/build_utils.py',
+    '<(DEPTH)/build/android/gyp/ant.py',
+    '<(resource_packaged_apk_path)',
+    '<@(extra_inputs)',
+    '>@(package_input_paths)',
+    '>(inputs_list_file)',
+  ],
+  'outputs': [
+    '<(apk_path)',
+  ],
+  'conditions': [
+    ['has_code == 1', {
+      'inputs': ['<(dex_path)'],
+      'action': [
+        '-DDEX_FILE_PATH=<(dex_path)',
+      ]
+    }],
+    ['enable_multidex == 1', {
+      'action': [
+        '-DMULTIDEX_ENABLED=1',
+      ]
+    }]
+  ],
+  'action': [
+    'python', '<(DEPTH)/build/android/gyp/ant.py',
+    '--',
+    '-quiet',
+    '-DHAS_CODE=<(has_code_str)',
+    '-DANDROID_SDK_ROOT=<(android_sdk_root)',
+    '-DANDROID_SDK_TOOLS=<(android_sdk_tools)',
+    '-DRESOURCE_PACKAGED_APK_NAME=<(resource_packaged_apk_name)',
+    '-DNATIVE_LIBS_DIR=<(native_libs_dir)',
+    '-DAPK_NAME=<(apk_name)',
+    '-DCONFIGURATION_NAME=<(CONFIGURATION_NAME)',
+    '-DOUT_DIR=<(intermediate_dir)',
+    '-DUNSIGNED_APK_PATH=<(apk_path)',
+    '-DEMMA_INSTRUMENT=<(emma_instrument)',
+    '-DEMMA_DEVICE_JAR=<(emma_device_jar)',
+    '-Dbasedir=.',
+    '-buildfile',
+    '<(DEPTH)/build/android/ant/apk-package.xml',
+  ]
+}
diff --git a/build/android/apksize.py b/build/android/apksize.py
new file mode 100755
index 0000000..ae5462b
--- /dev/null
+++ b/build/android/apksize.py
@@ -0,0 +1,228 @@
+#!/usr/bin/env python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import argparse
+import collections
+import json
+import logging
+import os
+import sys
+import zipfile
+
+_BASE_CHART = {
+    'format_version': '0.1',
+    'benchmark_name': 'apk_size',
+    'benchmark_description': 'APK size information.',
+    'trace_rerun_options': [],
+    'charts': {}
+}
+
+
+# TODO(rnephew): Add support for split apks.
+class ApkSizeInfo(object):
+
+  def __init__(self, path):
+    """ApkSizeInfo constructor.
+
+    Args:
+      path: Path to apk.
+    """
+    if not os.path.isfile(path):
+      raise IOError('Not a valid file path for apk.')
+    if not os.access(path, os.R_OK):
+      raise IOError('File is not readable.')
+    if not zipfile.is_zipfile(path):
+      raise TypeError('Not a valid apk')
+    logging.info('APK: %s', path)
+    self._apk_size = os.path.getsize(path)
+    self._zipfile = zipfile.ZipFile(path, 'r')
+    self._processed_files = None
+    self._compressed_size = 0
+    self._total_files = 0
+    self._uncompressed_size = 0
+    self._ProcessFiles()
+
+  def _ProcessFiles(self):
+    """Uses zipinfo to process apk file information."""
+    INITIAL_FILE_EXTENSION_INFO = {
+        'number': 0,
+        'compressed_bytes': 0,
+        'uncompressed_bytes': 0
+    }
+    self._processed_files = collections.defaultdict(
+        lambda: dict(INITIAL_FILE_EXTENSION_INFO))
+
+    for f in self._zipfile.infolist():
+      _, file_ext = os.path.splitext(f.filename)
+      file_ext = file_ext[1:] # Drop . from extension.
+
+      self._compressed_size += f.compress_size
+      self._total_files += 1
+      self._uncompressed_size += f.file_size
+      self._processed_files[file_ext]['number'] += 1
+      self._processed_files[file_ext]['compressed_bytes'] += f.compress_size
+      self._processed_files[file_ext]['uncompressed_bytes'] += f.file_size
+    return self._processed_files
+
+  def Compare(self, other_apk):
+    """Compares size information of two apks.
+
+    Args:
+      other_apk: ApkSizeInfo instance to compare size against.
+
+    Returns:
+      Dictionary of comparision results.
+    """
+    if not isinstance(other_apk, type(self)):
+      raise TypeError('Must pass it an ApkSizeInfo object')
+
+    other_lib_compressed = other_apk.processed_files['so']['compressed_bytes']
+    other_lib_uncompressed = (
+        other_apk.processed_files['so']['uncompressed_bytes'])
+    this_lib_compressed = self._processed_files['so']['compressed_bytes']
+    this_lib_uncompressed = self._processed_files['so']['uncompressed_bytes']
+
+    # TODO(rnephew) This will be made obsolete with modern and legacy apks being
+    # separate, a new method to compare will be required eventually.
+    return collections.OrderedDict([
+        ('APK_size_reduction',
+            other_apk.compressed_size - self.compressed_size),
+        ('ARM32_Legacy_install_or_upgrade_reduction',
+            (other_lib_compressed - this_lib_compressed) +
+            (other_lib_uncompressed - this_lib_uncompressed)),
+        ('ARM32_Legacy_system_image_reduction',
+            other_lib_compressed - this_lib_compressed),
+        ('ARM32_Modern_ARM64_install_or_upgrade_reduction',
+            other_lib_uncompressed - this_lib_uncompressed),
+        ('ARM32_Modern_ARM64_system_image_reduction',
+            other_lib_uncompressed - this_lib_uncompressed),
+    ])
+
+  @property
+  def apk_size(self):
+    return self._apk_size
+
+  @property
+  def compressed_size(self):
+    return self._compressed_size
+
+  @property
+  def total_files(self):
+    return self._total_files
+
+  @property
+  def uncompressed_size(self):
+    return self._uncompressed_size
+
+  @property
+  def processed_files(self):
+    return self._processed_files
+
+def add_value(chart_data, graph_title, trace_title, value, units,
+              improvement_direction='down', important=True):
+  chart_data['charts'].setdefault(graph_title, {})
+  chart_data['charts'][graph_title][trace_title] = {
+      'type': 'scalar',
+      'value': value,
+      'units': units,
+      'imporvement_direction': improvement_direction,
+      'important': important
+  }
+
+def chartjson_size_info(apk, output_dir):
+  """Sends size information to perf dashboard.
+
+  Args:
+    apk: ApkSizeInfo object
+  """
+  data = _BASE_CHART.copy()
+  files = apk.processed_files
+  add_value(data, 'files', 'total', apk.total_files, 'count')
+  add_value(data, 'size', 'total_size_compressed', apk.compressed_size, 'bytes')
+  add_value(data, 'size', 'total_size_uncompressed', apk.uncompressed_size,
+            'bytes')
+  add_value(data, 'size', 'apk_overhead', apk.apk_size - apk.compressed_size,
+           'bytes')
+  for ext in files:
+    add_value(data, 'files', ext, files[ext]['number'], 'count')
+    add_value(data, 'size_compressed', ext, files[ext]['compressed_bytes'],
+              'bytes')
+    add_value(data, 'size_uncompressed', ext, files[ext]['uncompressed_bytes'],
+              'bytes')
+
+  logging.info('Outputing data to json file %s', output_dir)
+  with open(os.path.join(output_dir, 'results-chart.json'), 'w') as json_file:
+    json.dump(data, json_file)
+
+def print_human_readable_size_info(apk):
+  """Prints size information in human readable format.
+
+  Args:
+    apk: ApkSizeInfo object
+  """
+  files = apk.processed_files
+  logging.critical('Stats for files as they exist within the apk:')
+  for ext in files:
+    logging.critical('  %-8s %s bytes in %s files', ext,
+                     files[ext]['compressed_bytes'], files[ext]['number'])
+  logging.critical('--------------------------------------')
+  logging.critical(
+      'All Files: %s bytes in %s files', apk.compressed_size, apk.total_files)
+  logging.critical('APK Size: %s', apk.apk_size)
+  logging.critical('APK overhead: %s', apk.apk_size - apk.compressed_size)
+  logging.critical('--------------------------------------')
+  logging.critical('Stats for files when extracted from the apk:')
+  for ext in files:
+    logging.critical('  %-8s %s bytes in %s files', ext,
+                     files[ext]['uncompressed_bytes'], files[ext]['number'])
+  logging.critical('--------------------------------------')
+  logging.critical(
+      'All Files: %s bytes in %s files', apk.uncompressed_size, apk.total_files)
+
+def chartjson_compare(compare_dict, output_dir):
+  """Sends size comparison between two apks to perf dashboard.
+
+  Args:
+    compare_dict: Dictionary returned from APkSizeInfo.Compare()
+  """
+  data = _BASE_CHART.copy()
+  for key, value in compare_dict.iteritems():
+    add_value(data, 'compare', key, value, 'bytes')
+
+  logging.info('Outputing data to json file %s', output_dir)
+  with open(os.path.join(output_dir, 'results-chart.json'), 'w') as json_file:
+    json.dump(data, json_file)
+
+def print_human_readable_compare(compare_dict):
+  """Prints size comparison between two apks in human readable format.
+
+  Args:
+    compare_dict: Dictionary returned from ApkSizeInfo.Compare()
+  """
+  for key, value in compare_dict.iteritems():
+    logging.critical('  %-50s %s bytes', key, value)
+
+def main():
+  parser = argparse.ArgumentParser()
+  parser.add_argument('file_path')
+  parser.add_argument('-c', '--compare', help='APK to compare against.')
+  parser.add_argument('-o', '--output-dir',
+                      help='Sets it to return data in bot readable format')
+  parser.add_argument('-d', '--device', help='Dummy option for perf runner.')
+  args = parser.parse_args()
+
+  apk = ApkSizeInfo(args.file_path)
+  if args.compare:
+    compare_dict = apk.Compare(ApkSizeInfo(args.compare))
+    print_human_readable_compare(compare_dict)
+    if args.output_dir:
+      chartjson_compare(compare_dict, args.output_dir)
+  else:
+    print_human_readable_size_info(apk)
+    if args.output_dir:
+       chartjson_size_info(apk, args.output_dir)
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/build/android/asan_symbolize.py b/build/android/asan_symbolize.py
new file mode 100755
index 0000000..d709f7e
--- /dev/null
+++ b/build/android/asan_symbolize.py
@@ -0,0 +1,114 @@
+#!/usr/bin/env python
+#
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import collections
+import optparse
+import os
+import re
+import sys
+
+from pylib import constants
+from pylib.constants import host_paths
+
+# Uses symbol.py from third_party/android_platform, not python's.
+with host_paths.SysPath(
+    host_paths.ANDROID_PLATFORM_DEVELOPMENT_SCRIPTS_PATH,
+    position=0):
+  import symbol
+
+
+_RE_ASAN = re.compile(r'(.*?)(#\S*?)\s+(\S*?)\s+\((.*?)\+(.*?)\)')
+
+def _ParseAsanLogLine(line):
+  m = re.match(_RE_ASAN, line)
+  if not m:
+    return None
+  return {
+      'prefix': m.group(1),
+      'library': m.group(4),
+      'pos': m.group(2),
+      'rel_address': '%08x' % int(m.group(5), 16),
+  }
+
+
+def _FindASanLibraries():
+  asan_lib_dir = os.path.join(host_paths.DIR_SOURCE_ROOT,
+                              'third_party', 'llvm-build',
+                              'Release+Asserts', 'lib')
+  asan_libs = []
+  for src_dir, _, files in os.walk(asan_lib_dir):
+    asan_libs += [os.path.relpath(os.path.join(src_dir, f))
+                  for f in files
+                  if f.endswith('.so')]
+  return asan_libs
+
+
+def _TranslateLibPath(library, asan_libs):
+  for asan_lib in asan_libs:
+    if os.path.basename(library) == os.path.basename(asan_lib):
+      return '/' + asan_lib
+  # pylint: disable=no-member
+  return symbol.TranslateLibPath(library)
+
+
+def _Symbolize(asan_input):
+  asan_libs = _FindASanLibraries()
+  libraries = collections.defaultdict(list)
+  asan_lines = []
+  for asan_log_line in [a.rstrip() for a in asan_input]:
+    m = _ParseAsanLogLine(asan_log_line)
+    if m:
+      libraries[m['library']].append(m)
+    asan_lines.append({'raw_log': asan_log_line, 'parsed': m})
+
+  all_symbols = collections.defaultdict(dict)
+  for library, items in libraries.iteritems():
+    libname = _TranslateLibPath(library, asan_libs)
+    lib_relative_addrs = set([i['rel_address'] for i in items])
+    # pylint: disable=no-member
+    info_dict = symbol.SymbolInformationForSet(libname,
+                                               lib_relative_addrs,
+                                               True)
+    if info_dict:
+      all_symbols[library]['symbols'] = info_dict
+
+  for asan_log_line in asan_lines:
+    m = asan_log_line['parsed']
+    if not m:
+      print asan_log_line['raw_log']
+      continue
+    if (m['library'] in all_symbols and
+        m['rel_address'] in all_symbols[m['library']]['symbols']):
+      s = all_symbols[m['library']]['symbols'][m['rel_address']][0]
+      print '%s%s %s %s' % (m['prefix'], m['pos'], s[0], s[1])
+    else:
+      print asan_log_line['raw_log']
+
+
+def main():
+  parser = optparse.OptionParser()
+  parser.add_option('-l', '--logcat',
+                    help='File containing adb logcat output with ASan stacks. '
+                         'Use stdin if not specified.')
+  parser.add_option('--output-directory',
+                    help='Path to the root build directory.')
+  options, _ = parser.parse_args()
+
+  if options.output_directory:
+    constants.SetOutputDirectory(options.output_directory)
+  # Do an up-front test that the output directory is known.
+  constants.CheckOutputDirectory()
+
+  if options.logcat:
+    asan_input = file(options.logcat, 'r')
+  else:
+    asan_input = sys.stdin
+  _Symbolize(asan_input.readlines())
+
+
+if __name__ == "__main__":
+  sys.exit(main())
diff --git a/build/android/avd.py b/build/android/avd.py
new file mode 100755
index 0000000..788ceaf
--- /dev/null
+++ b/build/android/avd.py
@@ -0,0 +1,150 @@
+#!/usr/bin/env python
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Launches Android Virtual Devices with a set configuration for testing Chrome.
+
+The script will launch a specified number of Android Virtual Devices (AVD's).
+"""
+
+import argparse
+import logging
+import os
+import re
+import sys
+
+import devil_chromium
+import install_emulator_deps
+
+from devil.utils import cmd_helper
+from pylib import constants
+from pylib.utils import emulator
+
+def main(argv):
+  # ANDROID_SDK_ROOT needs to be set to the location of the SDK used to launch
+  # the emulator to find the system images upon launch.
+  emulator_sdk = constants.ANDROID_SDK_ROOT
+  os.environ['ANDROID_SDK_ROOT'] = emulator_sdk
+
+  arg_parser = argparse.ArgumentParser(description='AVD script.')
+  sub_parsers = arg_parser.add_subparsers(title='subparser', dest='command')
+  sub_parsers.add_parser(
+      'kill', help='Shutdown all existing emulators')
+  sub_parsers.add_parser(
+      'delete', help='Deleting all the avd files')
+  wait_parser = sub_parsers.add_parser(
+      'wait', help='Wait for emulators to finish booting')
+  wait_parser.add_argument('-n', '--num', dest='wait_num',
+                           help='Number of emulators to wait for', type=int,
+                           default=1)
+  run_parser = sub_parsers.add_parser('run', help='Run emulators')
+  run_parser.add_argument('--name', help='Optinaly, name of existing AVD to '
+                          'launch. If not specified, AVD\'s will be created')
+  run_parser.add_argument('-n', '--num', dest='emulator_count',
+                          help='Number of emulators to launch (default is 1).',
+                          type=int, default='1')
+  run_parser.add_argument('--abi', default='x86',
+                          help='Platform of emulators to launch (x86 default)')
+  run_parser.add_argument('--api-level', dest='api_level',
+                          help='API level for the image',
+                          type=int, default=constants.ANDROID_SDK_VERSION)
+  run_parser.add_argument('--sdcard-size', dest='sdcard_size',
+                          default=emulator.DEFAULT_SDCARD_SIZE,
+                          help='Set sdcard size of the emulators'
+                          ' e.g. --sdcard-size=512M')
+  run_parser.add_argument('--partition-size', dest='partition_size',
+                          default=emulator.DEFAULT_STORAGE_SIZE,
+                          help='Default internal storage size'
+                          ' e.g. --partition-size=1024M')
+  run_parser.add_argument('--launch-without-kill', action='store_false',
+                          dest='kill_and_launch', default=True,
+                          help='Kill all emulators at launch')
+  run_parser.add_argument('--enable-kvm', action='store_true',
+                          dest='enable_kvm', default=False,
+                          help='Enable kvm for faster x86 emulator run')
+  run_parser.add_argument('--headless', action='store_true',
+                          dest='headless', default=False,
+                          help='Launch an emulator with no UI.')
+
+  arguments = arg_parser.parse_args(argv[1:])
+
+  logging.root.setLevel(logging.INFO)
+
+  devil_chromium.Initialize()
+
+  if arguments.command == 'kill':
+    logging.info('Killing all existing emulator and existing the program')
+    emulator.KillAllEmulators()
+  elif arguments.command == 'delete':
+    emulator.DeleteAllTempAVDs()
+  elif arguments.command == 'wait':
+    emulator.WaitForEmulatorLaunch(arguments.wait_num)
+  else:
+    # Check if SDK exist in ANDROID_SDK_ROOT
+    if not install_emulator_deps.CheckSDK():
+      raise Exception('Emulator SDK not installed in %s'
+                       % constants.ANDROID_SDK_ROOT)
+
+    # Check if KVM is enabled for x86 AVD
+    if arguments.abi == 'x86':
+      if not install_emulator_deps.CheckKVM():
+        logging.warning('KVM is not installed or enabled')
+        arguments.enable_kvm = False
+
+    # Check if targeted system image exist
+    if not install_emulator_deps.CheckSystemImage(arguments.abi,
+                                                  arguments.api_level):
+      logging.critical('ERROR: System image for %s AVD not installed. Run '
+                       'install_emulator_deps.py', arguments.abi)
+      return 1
+
+    # If AVD is specified, check that the SDK has the required target. If not,
+    # check that the SDK has the desired target for the temporary AVD's.
+    api_level = arguments.api_level
+    if arguments.name:
+      android = os.path.join(constants.ANDROID_SDK_ROOT, 'tools',
+                             'android')
+      avds_output = cmd_helper.GetCmdOutput([android, 'list', 'avd'])
+      names = re.findall(r'Name: (\w+)', avds_output)
+      api_levels = re.findall(r'API level (\d+)', avds_output)
+      try:
+        avd_index = names.index(arguments.name)
+      except ValueError:
+        logging.critical('ERROR: Specified AVD %s does not exist.',
+                         arguments.name)
+        return 1
+      api_level = int(api_levels[avd_index])
+
+    if not install_emulator_deps.CheckSDKPlatform(api_level):
+      logging.critical('ERROR: Emulator SDK missing required target for API %d.'
+                       ' Run install_emulator_deps.py.')
+      return 1
+
+    if arguments.name:
+      emulator.LaunchEmulator(
+          arguments.name,
+          arguments.abi,
+          enable_kvm=arguments.enable_kvm,
+          kill_and_launch=arguments.reset_and_launch,
+          sdcard_size=arguments.sdcard_size,
+          storage_size=arguments.partition_size,
+          headless=arguments.headless
+      )
+    else:
+      emulator.LaunchTempEmulators(
+          arguments.emulator_count,
+          arguments.abi,
+          arguments.api_level,
+          enable_kvm=arguments.enable_kvm,
+          kill_and_launch=arguments.kill_and_launch,
+          sdcard_size=arguments.sdcard_size,
+          storage_size=arguments.partition_size,
+          wait_for_boot=True,
+          headless=arguments.headless
+      )
+    logging.info('Emulator launch completed')
+  return 0
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv))
diff --git a/build/android/buildbot/bb_annotations.py b/build/android/buildbot/bb_annotations.py
new file mode 100644
index 0000000..059d673
--- /dev/null
+++ b/build/android/buildbot/bb_annotations.py
@@ -0,0 +1,46 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Helper functions to print buildbot messages."""
+
+def PrintLink(label, url):
+  """Adds a link with name |label| linking to |url| to current buildbot step.
+
+  Args:
+    label: A string with the name of the label.
+    url: A string of the URL.
+  """
+  print '@@@STEP_LINK@%s@%s@@@' % (label, url)
+
+
+def PrintMsg(msg):
+  """Appends |msg| to the current buildbot step text.
+
+  Args:
+    msg: String to be appended.
+  """
+  print '@@@STEP_TEXT@%s@@@' % msg
+
+
+def PrintSummaryText(msg):
+  """Appends |msg| to main build summary. Visible from waterfall.
+
+  Args:
+    msg: String to be appended.
+  """
+  print '@@@STEP_SUMMARY_TEXT@%s@@@' % msg
+
+
+def PrintError():
+  """Marks the current step as failed."""
+  print '@@@STEP_FAILURE@@@'
+
+
+def PrintWarning():
+  """Marks the current step with a warning."""
+  print '@@@STEP_WARNINGS@@@'
+
+
+def PrintNamedStep(step):
+  print '@@@BUILD_STEP %s@@@' % step
diff --git a/build/android/buildbot/bb_device_status_check.py b/build/android/buildbot/bb_device_status_check.py
new file mode 100755
index 0000000..5ea295e
--- /dev/null
+++ b/build/android/buildbot/bb_device_status_check.py
@@ -0,0 +1,429 @@
+#!/usr/bin/env python
+#
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""A class to keep track of devices across builds and report state."""
+
+import argparse
+import json
+import logging
+import os
+import psutil
+import re
+import signal
+import sys
+
+sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
+import devil_chromium
+from devil import devil_env
+from devil.android import battery_utils
+from devil.android import device_blacklist
+from devil.android import device_errors
+from devil.android import device_list
+from devil.android import device_utils
+from devil.android.sdk import adb_wrapper
+from devil.constants import exit_codes
+from devil.utils import lsusb
+from devil.utils import reset_usb
+from devil.utils import run_tests_helper
+from pylib.constants import host_paths
+
+_RE_DEVICE_ID = re.compile(r'Device ID = (\d+)')
+
+
+def KillAllAdb():
+  def GetAllAdb():
+    for p in psutil.process_iter():
+      try:
+        if 'adb' in p.name:
+          yield p
+      except (psutil.NoSuchProcess, psutil.AccessDenied):
+        pass
+
+  for sig in [signal.SIGTERM, signal.SIGQUIT, signal.SIGKILL]:
+    for p in GetAllAdb():
+      try:
+        logging.info('kill %d %d (%s [%s])', sig, p.pid, p.name,
+                     ' '.join(p.cmdline))
+        p.send_signal(sig)
+      except (psutil.NoSuchProcess, psutil.AccessDenied):
+        pass
+  for p in GetAllAdb():
+    try:
+      logging.error('Unable to kill %d (%s [%s])', p.pid, p.name,
+                    ' '.join(p.cmdline))
+    except (psutil.NoSuchProcess, psutil.AccessDenied):
+      pass
+
+
+def _IsBlacklisted(serial, blacklist):
+  return blacklist and serial in blacklist.Read()
+
+
+def _BatteryStatus(device, blacklist):
+  battery_info = {}
+  try:
+    battery = battery_utils.BatteryUtils(device)
+    battery_info = battery.GetBatteryInfo(timeout=5)
+    battery_level = int(battery_info.get('level', 100))
+
+    if battery_level < 15:
+      logging.error('Critically low battery level (%d)', battery_level)
+      battery = battery_utils.BatteryUtils(device)
+      if not battery.GetCharging():
+        battery.SetCharging(True)
+      if blacklist:
+        blacklist.Extend([device.adb.GetDeviceSerial()], reason='low_battery')
+
+  except device_errors.CommandFailedError:
+    logging.exception('Failed to get battery information for %s',
+                      str(device))
+
+  return battery_info
+
+
+def _IMEISlice(device):
+  imei_slice = ''
+  try:
+    for l in device.RunShellCommand(['dumpsys', 'iphonesubinfo'],
+                                    check_return=True, timeout=5):
+      m = _RE_DEVICE_ID.match(l)
+      if m:
+        imei_slice = m.group(1)[-6:]
+  except device_errors.CommandFailedError:
+    logging.exception('Failed to get IMEI slice for %s', str(device))
+
+  return imei_slice
+
+
+def DeviceStatus(devices, blacklist):
+  """Generates status information for the given devices.
+
+  Args:
+    devices: The devices to generate status for.
+    blacklist: The current device blacklist.
+  Returns:
+    A dict of the following form:
+    {
+      '<serial>': {
+        'serial': '<serial>',
+        'adb_status': str,
+        'usb_status': bool,
+        'blacklisted': bool,
+        # only if the device is connected and not blacklisted
+        'type': ro.build.product,
+        'build': ro.build.id,
+        'build_detail': ro.build.fingerprint,
+        'battery': {
+          ...
+        },
+        'imei_slice': str,
+        'wifi_ip': str,
+      },
+      ...
+    }
+  """
+  adb_devices = {
+    a[0].GetDeviceSerial(): a
+    for a in adb_wrapper.AdbWrapper.Devices(desired_state=None, long_list=True)
+  }
+  usb_devices = set(lsusb.get_android_devices())
+
+  def blacklisting_device_status(device):
+    serial = device.adb.GetDeviceSerial()
+    adb_status = (
+        adb_devices[serial][1] if serial in adb_devices
+        else 'missing')
+    usb_status = bool(serial in usb_devices)
+
+    device_status = {
+      'serial': serial,
+      'adb_status': adb_status,
+      'usb_status': usb_status,
+    }
+
+    if not _IsBlacklisted(serial, blacklist):
+      if adb_status == 'device':
+        try:
+          build_product = device.build_product
+          build_id = device.build_id
+          build_fingerprint = device.GetProp('ro.build.fingerprint', cache=True)
+          wifi_ip = device.GetProp('dhcp.wlan0.ipaddress')
+          battery_info = _BatteryStatus(device, blacklist)
+          imei_slice = _IMEISlice(device)
+
+          if (device.product_name == 'mantaray' and
+              battery_info.get('AC powered', None) != 'true'):
+            logging.error('Mantaray device not connected to AC power.')
+
+          device_status.update({
+            'ro.build.product': build_product,
+            'ro.build.id': build_id,
+            'ro.build.fingerprint': build_fingerprint,
+            'battery': battery_info,
+            'imei_slice': imei_slice,
+            'wifi_ip': wifi_ip,
+
+            # TODO(jbudorick): Remove these once no clients depend on them.
+            'type': build_product,
+            'build': build_id,
+            'build_detail': build_fingerprint,
+          })
+
+        except device_errors.CommandFailedError:
+          logging.exception('Failure while getting device status for %s.',
+                            str(device))
+          if blacklist:
+            blacklist.Extend([serial], reason='status_check_failure')
+
+        except device_errors.CommandTimeoutError:
+          logging.exception('Timeout while getting device status for %s.',
+                            str(device))
+          if blacklist:
+            blacklist.Extend([serial], reason='status_check_timeout')
+
+      elif blacklist:
+        blacklist.Extend([serial],
+                         reason=adb_status if usb_status else 'offline')
+
+    device_status['blacklisted'] = _IsBlacklisted(serial, blacklist)
+
+    return device_status
+
+  parallel_devices = device_utils.DeviceUtils.parallel(devices)
+  statuses = parallel_devices.pMap(blacklisting_device_status).pGet(None)
+  return statuses
+
+
+def RecoverDevices(devices, blacklist):
+  """Attempts to recover any inoperable devices in the provided list.
+
+  Args:
+    devices: The list of devices to attempt to recover.
+    blacklist: The current device blacklist, which will be used then
+      reset.
+  Returns:
+    Nothing.
+  """
+
+  statuses = DeviceStatus(devices, blacklist)
+
+  should_restart_usb = set(
+      status['serial'] for status in statuses
+      if (not status['usb_status']
+          or status['adb_status'] in ('offline', 'missing')))
+  should_restart_adb = should_restart_usb.union(set(
+      status['serial'] for status in statuses
+      if status['adb_status'] == 'unauthorized'))
+  should_reboot_device = should_restart_adb.union(set(
+      status['serial'] for status in statuses
+      if status['blacklisted']))
+
+  logging.debug('Should restart USB for:')
+  for d in should_restart_usb:
+    logging.debug('  %s', d)
+  logging.debug('Should restart ADB for:')
+  for d in should_restart_adb:
+    logging.debug('  %s', d)
+  logging.debug('Should reboot:')
+  for d in should_reboot_device:
+    logging.debug('  %s', d)
+
+  if blacklist:
+    blacklist.Reset()
+
+  if should_restart_adb:
+    KillAllAdb()
+  for serial in should_restart_usb:
+    try:
+      reset_usb.reset_android_usb(serial)
+    except IOError:
+      logging.exception('Unable to reset USB for %s.', serial)
+      if blacklist:
+        blacklist.Extend([serial], reason='usb_failure')
+    except device_errors.DeviceUnreachableError:
+      logging.exception('Unable to reset USB for %s.', serial)
+      if blacklist:
+        blacklist.Extend([serial], reason='offline')
+
+  def blacklisting_recovery(device):
+    if _IsBlacklisted(device.adb.GetDeviceSerial(), blacklist):
+      logging.debug('%s is blacklisted, skipping recovery.', str(device))
+      return
+
+    if str(device) in should_reboot_device:
+      try:
+        device.WaitUntilFullyBooted(retries=0)
+        return
+      except (device_errors.CommandTimeoutError,
+              device_errors.CommandFailedError):
+        logging.exception('Failure while waiting for %s. '
+                          'Attempting to recover.', str(device))
+
+      try:
+        try:
+          device.Reboot(block=False, timeout=5, retries=0)
+        except device_errors.CommandTimeoutError:
+          logging.warning('Timed out while attempting to reboot %s normally.'
+                          'Attempting alternative reboot.', str(device))
+          # The device drops offline before we can grab the exit code, so
+          # we don't check for status.
+          device.adb.Root()
+          device.adb.Shell('echo b > /proc/sysrq-trigger', expect_status=None,
+                           timeout=5, retries=0)
+      except device_errors.CommandFailedError:
+        logging.exception('Failed to reboot %s.', str(device))
+        if blacklist:
+          blacklist.Extend([device.adb.GetDeviceSerial()],
+                           reason='reboot_failure')
+      except device_errors.CommandTimeoutError:
+        logging.exception('Timed out while rebooting %s.', str(device))
+        if blacklist:
+          blacklist.Extend([device.adb.GetDeviceSerial()],
+                           reason='reboot_timeout')
+
+      try:
+        device.WaitUntilFullyBooted(retries=0)
+      except device_errors.CommandFailedError:
+        logging.exception('Failure while waiting for %s.', str(device))
+        if blacklist:
+          blacklist.Extend([device.adb.GetDeviceSerial()],
+                           reason='reboot_failure')
+      except device_errors.CommandTimeoutError:
+        logging.exception('Timed out while waiting for %s.', str(device))
+        if blacklist:
+          blacklist.Extend([device.adb.GetDeviceSerial()],
+                           reason='reboot_timeout')
+
+  device_utils.DeviceUtils.parallel(devices).pMap(blacklisting_recovery)
+
+
+def main():
+  parser = argparse.ArgumentParser()
+  parser.add_argument('--out-dir',
+                      help='Directory where the device path is stored',
+                      default=os.path.join(host_paths.DIR_SOURCE_ROOT, 'out'))
+  parser.add_argument('--restart-usb', action='store_true',
+                      help='DEPRECATED. '
+                           'This script now always tries to reset USB.')
+  parser.add_argument('--json-output',
+                      help='Output JSON information into a specified file.')
+  parser.add_argument('--adb-path',
+                      help='Absolute path to the adb binary to use.')
+  parser.add_argument('--blacklist-file', help='Device blacklist JSON file.')
+  parser.add_argument('--known-devices-file', action='append', default=[],
+                      dest='known_devices_files',
+                      help='Path to known device lists.')
+  parser.add_argument('-v', '--verbose', action='count', default=1,
+                      help='Log more information.')
+
+  args = parser.parse_args()
+
+  run_tests_helper.SetLogLevel(args.verbose)
+
+  devil_custom_deps = None
+  if args.adb_path:
+    devil_custom_deps = {
+      'adb': {
+        devil_env.GetPlatform(): [args.adb_path],
+      },
+    }
+
+  devil_chromium.Initialize(custom_deps=devil_custom_deps)
+
+  blacklist = (device_blacklist.Blacklist(args.blacklist_file)
+               if args.blacklist_file
+               else None)
+
+  last_devices_path = os.path.join(
+      args.out_dir, device_list.LAST_DEVICES_FILENAME)
+  args.known_devices_files.append(last_devices_path)
+
+  expected_devices = set()
+  try:
+    for path in args.known_devices_files:
+      if os.path.exists(path):
+        expected_devices.update(device_list.GetPersistentDeviceList(path))
+  except IOError:
+    logging.warning('Problem reading %s, skipping.', path)
+
+  logging.info('Expected devices:')
+  for device in expected_devices:
+    logging.info('  %s', device)
+
+  usb_devices = set(lsusb.get_android_devices())
+  devices = [device_utils.DeviceUtils(s)
+             for s in expected_devices.union(usb_devices)]
+
+  RecoverDevices(devices, blacklist)
+  statuses = DeviceStatus(devices, blacklist)
+
+  # Log the state of all devices.
+  for status in statuses:
+    logging.info(status['serial'])
+    adb_status = status.get('adb_status')
+    blacklisted = status.get('blacklisted')
+    logging.info('  USB status: %s',
+                 'online' if status.get('usb_status') else 'offline')
+    logging.info('  ADB status: %s', adb_status)
+    logging.info('  Blacklisted: %s', str(blacklisted))
+    if adb_status == 'device' and not blacklisted:
+      logging.info('  Device type: %s', status.get('ro.build.product'))
+      logging.info('  OS build: %s', status.get('ro.build.id'))
+      logging.info('  OS build fingerprint: %s',
+                   status.get('ro.build.fingerprint'))
+      logging.info('  Battery state:')
+      for k, v in status.get('battery', {}).iteritems():
+        logging.info('    %s: %s', k, v)
+      logging.info('  IMEI slice: %s', status.get('imei_slice'))
+      logging.info('  WiFi IP: %s', status.get('wifi_ip'))
+
+  # Update the last devices file(s).
+  for path in args.known_devices_files:
+    device_list.WritePersistentDeviceList(
+        path, [status['serial'] for status in statuses])
+
+  # Write device info to file for buildbot info display.
+  if os.path.exists('/home/chrome-bot'):
+    with open('/home/chrome-bot/.adb_device_info', 'w') as f:
+      for status in statuses:
+        try:
+          if status['adb_status'] == 'device':
+            f.write('{serial} {adb_status} {build_product} {build_id} '
+                    '{temperature:.1f}C {level}%\n'.format(
+                serial=status['serial'],
+                adb_status=status['adb_status'],
+                build_product=status['type'],
+                build_id=status['build'],
+                temperature=float(status['battery']['temperature']) / 10,
+                level=status['battery']['level']
+            ))
+          elif status.get('usb_status', False):
+            f.write('{serial} {adb_status}\n'.format(
+                serial=status['serial'],
+                adb_status=status['adb_status']
+            ))
+          else:
+            f.write('{serial} offline\n'.format(
+                serial=status['serial']
+            ))
+        except Exception: # pylint: disable=broad-except
+          pass
+
+  # Dump the device statuses to JSON.
+  if args.json_output:
+    with open(args.json_output, 'wb') as f:
+      f.write(json.dumps(statuses, indent=4))
+
+  live_devices = [status['serial'] for status in statuses
+                  if (status['adb_status'] == 'device'
+                      and not _IsBlacklisted(status['serial'], blacklist))]
+
+  # If all devices failed, or if there are no devices, it's an infra error.
+  return 0 if live_devices else exit_codes.INFRA
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/build/android/buildbot/bb_device_steps.py b/build/android/buildbot/bb_device_steps.py
new file mode 100755
index 0000000..93b19e6
--- /dev/null
+++ b/build/android/buildbot/bb_device_steps.py
@@ -0,0 +1,783 @@
+#!/usr/bin/env python
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+import glob
+import hashlib
+import json
+import os
+import random
+import re
+import shutil
+import sys
+
+import bb_utils
+import bb_annotations
+
+sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
+import devil_chromium
+import provision_devices
+from devil.android import device_utils
+from pylib import constants
+from pylib.gtest import gtest_config
+
+CHROME_SRC_DIR = bb_utils.CHROME_SRC
+DIR_BUILD_ROOT = os.path.dirname(CHROME_SRC_DIR)
+CHROME_OUT_DIR = bb_utils.CHROME_OUT_DIR
+BLINK_SCRIPTS_DIR = 'third_party/WebKit/Tools/Scripts'
+
+SLAVE_SCRIPTS_DIR = os.path.join(bb_utils.BB_BUILD_DIR, 'scripts', 'slave')
+LOGCAT_DIR = os.path.join(bb_utils.CHROME_OUT_DIR, 'logcat')
+GS_URL = 'https://storage.googleapis.com'
+GS_AUTH_URL = 'https://storage.cloud.google.com'
+
+# Describes an instrumation test suite:
+#   test: Name of test we're running.
+#   apk: apk to be installed.
+#   apk_package: package for the apk to be installed.
+#   test_apk: apk to run tests on.
+#   test_data: data folder in format destination:source.
+#   host_driven_root: The host-driven test root directory.
+#   annotation: Annotation of the tests to include.
+#   exclude_annotation: The annotation of the tests to exclude.
+I_TEST = collections.namedtuple('InstrumentationTest', [
+    'name', 'apk', 'apk_package', 'test_apk', 'test_data', 'isolate_file_path',
+    'host_driven_root', 'annotation', 'exclude_annotation', 'extra_flags'])
+
+
+def SrcPath(*path):
+  return os.path.join(CHROME_SRC_DIR, *path)
+
+
+def I(name, apk, apk_package, test_apk, test_data, isolate_file_path=None,
+      host_driven_root=None, annotation=None, exclude_annotation=None,
+      extra_flags=None):
+  return I_TEST(name, apk, apk_package, test_apk, test_data, isolate_file_path,
+                host_driven_root, annotation, exclude_annotation, extra_flags)
+
+INSTRUMENTATION_TESTS = dict((suite.name, suite) for suite in [
+    I('ContentShell',
+      'ContentShell.apk',
+      'org.chromium.content_shell_apk',
+      'ContentShellTest',
+      'content:content/test/data/android/device_files',
+      isolate_file_path='content/content_shell_test_data.isolate'),
+    I('ChromePublic',
+      'ChromePublic.apk',
+      'org.chromium.chrome',
+      'ChromePublicTest',
+      'chrome:chrome/test/data/android/device_files',
+      isolate_file_path='chrome/chrome_public_test_apk.isolate'),
+    I('AndroidWebView',
+      'AndroidWebView.apk',
+      'org.chromium.android_webview.shell',
+      'AndroidWebViewTest',
+      'webview:android_webview/test/data/device_files',
+      isolate_file_path='android_webview/android_webview_test_data.isolate'),
+    I('ChromeSyncShell',
+      'ChromeSyncShell.apk',
+      'org.chromium.chrome.browser.sync',
+      'ChromeSyncShellTest',
+      None),
+    ])
+
+InstallablePackage = collections.namedtuple('InstallablePackage', [
+    'name', 'apk', 'apk_package'])
+
+INSTALLABLE_PACKAGES = dict((package.name, package) for package in (
+    [InstallablePackage(i.name, i.apk, i.apk_package)
+     for i in INSTRUMENTATION_TESTS.itervalues()] +
+    [InstallablePackage('ChromeDriverWebViewShell',
+                        'ChromeDriverWebViewShell.apk',
+                        'org.chromium.chromedriver_webview_shell')]))
+
+VALID_TESTS = set([
+    'base_junit_tests',
+    'chromedriver',
+    'components_browsertests',
+    'gfx_unittests',
+    'gl_unittests',
+    'gpu',
+    'python_unittests',
+    'ui',
+    'unit',
+    'webkit',
+    'webkit_layout'
+])
+
+RunCmd = bb_utils.RunCmd
+
+
+def _GetRevision(options):
+  """Get the SVN revision number.
+
+  Args:
+    options: options object.
+
+  Returns:
+    The revision number.
+  """
+  revision = options.build_properties.get('got_revision')
+  if not revision:
+    revision = options.build_properties.get('revision', 'testing')
+  return revision
+
+
+def _RunTest(options, cmd, suite):
+  """Run test command with runtest.py.
+
+  Args:
+    options: options object.
+    cmd: the command to run.
+    suite: test name.
+  """
+  property_args = bb_utils.EncodeProperties(options)
+  args = [os.path.join(SLAVE_SCRIPTS_DIR, 'runtest.py')] + property_args
+  args += ['--test-platform', 'android']
+  if options.factory_properties.get('generate_gtest_json'):
+    args.append('--generate-json-file')
+    args += ['-o', 'gtest-results/%s' % suite,
+             '--annotate', 'gtest',
+             '--build-number', str(options.build_properties.get('buildnumber',
+                                                                '')),
+             '--builder-name', options.build_properties.get('buildername', '')]
+  if options.target == 'Release':
+    args += ['--target', 'Release']
+  else:
+    args += ['--target', 'Debug']
+  if options.flakiness_server:
+    args += ['--flakiness-dashboard-server=%s' %
+                options.flakiness_server]
+  args += cmd
+  RunCmd(args, cwd=DIR_BUILD_ROOT)
+
+
+def RunTestSuites(options, suites, suites_options=None):
+  """Manages an invocation of test_runner.py for gtests.
+
+  Args:
+    options: options object.
+    suites: List of suite names to run.
+    suites_options: Command line options dictionary for particular suites.
+                    For example,
+                    {'content_browsertests', ['--num_retries=1', '--release']}
+                    will add the options only to content_browsertests.
+  """
+
+  if not suites_options:
+    suites_options = {}
+
+  args = ['--verbose', '--blacklist-file', 'out/bad_devices.json']
+  if options.target == 'Release':
+    args.append('--release')
+  if options.asan:
+    args.append('--tool=asan')
+  if options.gtest_filter:
+    args.append('--gtest-filter=%s' % options.gtest_filter)
+
+  for suite in suites:
+    bb_annotations.PrintNamedStep(suite)
+    cmd = [suite] + args
+    cmd += suites_options.get(suite, [])
+    if suite == 'content_browsertests' or suite == 'components_browsertests':
+      cmd.append('--num_retries=1')
+    _RunTest(options, cmd, suite)
+
+
+def RunJunitSuite(suite):
+  bb_annotations.PrintNamedStep(suite)
+  RunCmd(['build/android/test_runner.py', 'junit', '-s', suite])
+
+
+def RunChromeDriverTests(options):
+  """Run all the steps for running chromedriver tests."""
+  bb_annotations.PrintNamedStep('chromedriver_annotation')
+  RunCmd(['chrome/test/chromedriver/run_buildbot_steps.py',
+          '--android-packages=%s,%s,%s,%s' %
+          ('chromium',
+           'chrome_stable',
+           'chrome_beta',
+           'chromedriver_webview_shell'),
+          '--revision=%s' % _GetRevision(options),
+          '--update-log'])
+
+
+def InstallApk(options, test, print_step=False):
+  """Install an apk to all phones.
+
+  Args:
+    options: options object
+    test: An I_TEST namedtuple
+    print_step: Print a buildbot step
+  """
+  if print_step:
+    bb_annotations.PrintNamedStep('install_%s' % test.name.lower())
+
+  args = [
+      '--apk_package', test.apk_package,
+      '--blacklist-file', 'out/bad_devices.json',
+  ]
+  if options.target == 'Release':
+    args.append('--release')
+  args.append(test.apk)
+
+  RunCmd(['build/android/adb_install_apk.py'] + args, halt_on_failure=True)
+
+
+def RunInstrumentationSuite(options, test, flunk_on_failure=True,
+                            python_only=False, official_build=False):
+  """Manages an invocation of test_runner.py for instrumentation tests.
+
+  Args:
+    options: options object
+    test: An I_TEST namedtuple
+    flunk_on_failure: Flunk the step if tests fail.
+    Python: Run only host driven Python tests.
+    official_build: Run official-build tests.
+  """
+  bb_annotations.PrintNamedStep('%s_instrumentation_tests' % test.name.lower())
+
+  if test.apk:
+    InstallApk(options, test)
+  args = [
+      '--test-apk', test.test_apk, '--verbose',
+      '--blacklist-file', 'out/bad_devices.json'
+  ]
+  if test.test_data:
+    args.extend(['--test_data', test.test_data])
+  if options.target == 'Release':
+    args.append('--release')
+  if options.asan:
+    args.append('--tool=asan')
+  if options.flakiness_server:
+    args.append('--flakiness-dashboard-server=%s' %
+                options.flakiness_server)
+  if options.coverage_bucket:
+    args.append('--coverage-dir=%s' % options.coverage_dir)
+  if test.isolate_file_path:
+    args.append('--isolate-file-path=%s' % test.isolate_file_path)
+  if test.host_driven_root:
+    args.append('--host-driven-root=%s' % test.host_driven_root)
+  if test.annotation:
+    args.extend(['-A', test.annotation])
+  if test.exclude_annotation:
+    args.extend(['-E', test.exclude_annotation])
+  if test.extra_flags:
+    args.extend(test.extra_flags)
+  if python_only:
+    args.append('-p')
+  if official_build:
+    # The option needs to be assigned 'True' as it does not have an action
+    # associated with it.
+    args.append('--official-build')
+
+  RunCmd(['build/android/test_runner.py', 'instrumentation'] + args,
+         flunk_on_failure=flunk_on_failure)
+
+
+def RunWebkitLint():
+  """Lint WebKit's TestExpectation files."""
+  bb_annotations.PrintNamedStep('webkit_lint')
+  RunCmd([SrcPath(os.path.join(BLINK_SCRIPTS_DIR, 'lint-test-expectations'))])
+
+
+def RunWebkitLayoutTests(options):
+  """Run layout tests on an actual device."""
+  bb_annotations.PrintNamedStep('webkit_tests')
+  cmd_args = [
+      '--no-show-results',
+      '--no-new-test-results',
+      '--full-results-html',
+      '--clobber-old-results',
+      '--exit-after-n-failures', '5000',
+      '--exit-after-n-crashes-or-timeouts', '100',
+      '--debug-rwt-logging',
+      '--results-directory', '../layout-test-results',
+      '--target', options.target,
+      '--builder-name', options.build_properties.get('buildername', ''),
+      '--build-number', str(options.build_properties.get('buildnumber', '')),
+      '--master-name', 'ChromiumWebkit',  # TODO: Get this from the cfg.
+      '--build-name', options.build_properties.get('buildername', ''),
+      '--platform=android']
+
+  for flag in 'test_results_server', 'driver_name', 'additional_driver_flag':
+    if flag in options.factory_properties:
+      cmd_args.extend(['--%s' % flag.replace('_', '-'),
+                       options.factory_properties.get(flag)])
+
+  for f in options.factory_properties.get('additional_expectations', []):
+    cmd_args.extend(
+        ['--additional-expectations=%s' % os.path.join(CHROME_SRC_DIR, *f)])
+
+  # TODO(dpranke): Remove this block after
+  # https://codereview.chromium.org/12927002/ lands.
+  for f in options.factory_properties.get('additional_expectations_files', []):
+    cmd_args.extend(
+        ['--additional-expectations=%s' % os.path.join(CHROME_SRC_DIR, *f)])
+
+  exit_code = RunCmd(
+      [SrcPath(os.path.join(BLINK_SCRIPTS_DIR, 'run-webkit-tests'))] + cmd_args)
+  if exit_code == 255: # test_run_results.UNEXPECTED_ERROR_EXIT_STATUS
+    bb_annotations.PrintMsg('?? (crashed or hung)')
+  elif exit_code == 254: # test_run_results.NO_DEVICES_EXIT_STATUS
+    bb_annotations.PrintMsg('?? (no devices found)')
+  elif exit_code == 253: # test_run_results.NO_TESTS_EXIT_STATUS
+    bb_annotations.PrintMsg('?? (no tests found)')
+  else:
+    full_results_path = os.path.join('..', 'layout-test-results',
+                                     'full_results.json')
+    if os.path.exists(full_results_path):
+      full_results = json.load(open(full_results_path))
+      unexpected_passes, unexpected_failures, unexpected_flakes = (
+          _ParseLayoutTestResults(full_results))
+      if unexpected_failures:
+        _PrintDashboardLink('failed', unexpected_failures.keys(),
+                            max_tests=25)
+      elif unexpected_passes:
+        _PrintDashboardLink('unexpected passes', unexpected_passes.keys(),
+                            max_tests=10)
+      if unexpected_flakes:
+        _PrintDashboardLink('unexpected flakes', unexpected_flakes.keys(),
+                            max_tests=10)
+
+      if exit_code == 0 and (unexpected_passes or unexpected_flakes):
+        # If exit_code != 0, RunCmd() will have already printed an error.
+        bb_annotations.PrintWarning()
+    else:
+      bb_annotations.PrintError()
+      bb_annotations.PrintMsg('?? (results missing)')
+
+  if options.factory_properties.get('archive_webkit_results', False):
+    bb_annotations.PrintNamedStep('archive_webkit_results')
+    base = 'https://storage.googleapis.com/chromium-layout-test-archives'
+    builder_name = options.build_properties.get('buildername', '')
+    build_number = str(options.build_properties.get('buildnumber', ''))
+    results_link = '%s/%s/%s/layout-test-results/results.html' % (
+        base, EscapeBuilderName(builder_name), build_number)
+    bb_annotations.PrintLink('results', results_link)
+    bb_annotations.PrintLink('(zip)', '%s/%s/%s/layout-test-results.zip' % (
+        base, EscapeBuilderName(builder_name), build_number))
+    gs_bucket = 'gs://chromium-layout-test-archives'
+    RunCmd([os.path.join(SLAVE_SCRIPTS_DIR, 'chromium',
+                         'archive_layout_test_results.py'),
+            '--results-dir', '../../layout-test-results',
+            '--build-number', build_number,
+            '--builder-name', builder_name,
+            '--gs-bucket', gs_bucket],
+            cwd=DIR_BUILD_ROOT)
+
+
+def _ParseLayoutTestResults(results):
+  """Extract the failures from the test run."""
+  # Cloned from third_party/WebKit/Tools/Scripts/print-json-test-results
+  tests = _ConvertTrieToFlatPaths(results['tests'])
+  failures = {}
+  flakes = {}
+  passes = {}
+  for (test, result) in tests.iteritems():
+    if result.get('is_unexpected'):
+      actual_results = result['actual'].split()
+      expected_results = result['expected'].split()
+      if len(actual_results) > 1:
+        # We report the first failure type back, even if the second
+        # was more severe.
+        if actual_results[1] in expected_results:
+          flakes[test] = actual_results[0]
+        else:
+          failures[test] = actual_results[0]
+      elif actual_results[0] == 'PASS':
+        passes[test] = result
+      else:
+        failures[test] = actual_results[0]
+
+  return (passes, failures, flakes)
+
+
+def _ConvertTrieToFlatPaths(trie, prefix=None):
+  """Flatten the trie of failures into a list."""
+  # Cloned from third_party/WebKit/Tools/Scripts/print-json-test-results
+  result = {}
+  for name, data in trie.iteritems():
+    if prefix:
+      name = prefix + '/' + name
+
+    if len(data) and 'actual' not in data and 'expected' not in data:
+      result.update(_ConvertTrieToFlatPaths(data, name))
+    else:
+      result[name] = data
+
+  return result
+
+
+def _PrintDashboardLink(link_text, tests, max_tests):
+  """Add a link to the flakiness dashboard in the step annotations."""
+  if len(tests) > max_tests:
+    test_list_text = ' '.join(tests[:max_tests]) + ' and more'
+  else:
+    test_list_text = ' '.join(tests)
+
+  dashboard_base = ('http://test-results.appspot.com'
+                    '/dashboards/flakiness_dashboard.html#'
+                    'master=ChromiumWebkit&tests=')
+
+  bb_annotations.PrintLink('%d %s: %s' %
+                           (len(tests), link_text, test_list_text),
+                           dashboard_base + ','.join(tests))
+
+
+def EscapeBuilderName(builder_name):
+  return re.sub('[ ()]', '_', builder_name)
+
+
+def SpawnLogcatMonitor():
+  shutil.rmtree(LOGCAT_DIR, ignore_errors=True)
+  bb_utils.SpawnCmd([
+      os.path.join(CHROME_SRC_DIR, 'build', 'android', 'adb_logcat_monitor.py'),
+      LOGCAT_DIR])
+
+  # Wait for logcat_monitor to pull existing logcat
+  RunCmd(['sleep', '5'])
+
+
+def ProvisionDevices(options):
+  bb_annotations.PrintNamedStep('provision_devices')
+
+  if not bb_utils.TESTING:
+    # Restart adb to work around bugs, sleep to wait for usb discovery.
+    device_utils.RestartServer()
+    RunCmd(['sleep', '1'])
+  provision_cmd = [
+      'build/android/provision_devices.py', '-t', options.target,
+      '--blacklist-file', 'out/bad_devices.json'
+  ]
+  if options.auto_reconnect:
+    provision_cmd.append('--auto-reconnect')
+  if options.skip_wipe:
+    provision_cmd.append('--skip-wipe')
+  if options.disable_location:
+    provision_cmd.append('--disable-location')
+  RunCmd(provision_cmd, halt_on_failure=True)
+
+
+def DeviceStatusCheck(options):
+  bb_annotations.PrintNamedStep('device_status_check')
+  cmd = [
+      'build/android/buildbot/bb_device_status_check.py',
+      '--blacklist-file', 'out/bad_devices.json',
+  ]
+  if options.restart_usb:
+    cmd.append('--restart-usb')
+  RunCmd(cmd, halt_on_failure=True)
+
+
+def GetDeviceSetupStepCmds():
+  return [
+      ('device_status_check', DeviceStatusCheck),
+      ('provision_devices', ProvisionDevices),
+  ]
+
+
+def RunUnitTests(options):
+  suites = gtest_config.STABLE_TEST_SUITES
+  if options.asan:
+    suites = [s for s in suites
+              if s not in gtest_config.ASAN_EXCLUDED_TEST_SUITES]
+  RunTestSuites(options, suites)
+
+
+def RunInstrumentationTests(options):
+  for test in INSTRUMENTATION_TESTS.itervalues():
+    RunInstrumentationSuite(options, test)
+
+
+def RunWebkitTests(options):
+  RunTestSuites(options, ['webkit_unit_tests', 'blink_heap_unittests'])
+  RunWebkitLint()
+
+
+def RunGPUTests(options):
+  exit_code = 0
+  revision = _GetRevision(options)
+  builder_name = options.build_properties.get('buildername', 'noname')
+
+  bb_annotations.PrintNamedStep('pixel_tests')
+  exit_code = RunCmd(['content/test/gpu/run_gpu_test.py',
+                      'pixel', '-v',
+                      '--browser',
+                      'android-content-shell',
+                      '--build-revision',
+                      str(revision),
+                      '--upload-refimg-to-cloud-storage',
+                      '--refimg-cloud-storage-bucket',
+                      'chromium-gpu-archive/reference-images',
+                      '--os-type',
+                      'android',
+                      '--test-machine-name',
+                      EscapeBuilderName(builder_name),
+                      '--android-blacklist-file',
+                      'out/bad_devices.json']) or exit_code
+
+  bb_annotations.PrintNamedStep('webgl_conformance_tests')
+  exit_code = RunCmd(['content/test/gpu/run_gpu_test.py', '-v',
+                      '--browser=android-content-shell', 'webgl_conformance',
+                      '--webgl-conformance-version=1.0.1',
+                      '--android-blacklist-file',
+                      'out/bad_devices.json']) or exit_code
+
+  bb_annotations.PrintNamedStep('android_webview_webgl_conformance_tests')
+  exit_code = RunCmd(['content/test/gpu/run_gpu_test.py', '-v',
+                      '--browser=android-webview-shell', 'webgl_conformance',
+                      '--webgl-conformance-version=1.0.1',
+                      '--android-blacklist-file',
+                      'out/bad_devices.json']) or exit_code
+
+  bb_annotations.PrintNamedStep('gpu_rasterization_tests')
+  exit_code = RunCmd(['content/test/gpu/run_gpu_test.py',
+                      'gpu_rasterization', '-v',
+                      '--browser',
+                      'android-content-shell',
+                      '--build-revision',
+                      str(revision),
+                      '--test-machine-name',
+                      EscapeBuilderName(builder_name),
+                      '--android-blacklist-file',
+                      'out/bad_devices.json']) or exit_code
+
+  return exit_code
+
+
+def RunPythonUnitTests(_options):
+  for suite in constants.PYTHON_UNIT_TEST_SUITES:
+    bb_annotations.PrintNamedStep(suite)
+    RunCmd(['build/android/test_runner.py', 'python', '-s', suite])
+
+
+def GetTestStepCmds():
+  return [
+      ('base_junit_tests',
+          lambda _options: RunJunitSuite('base_junit_tests')),
+      ('chromedriver', RunChromeDriverTests),
+      ('components_browsertests',
+          lambda options: RunTestSuites(options, ['components_browsertests'])),
+      ('gfx_unittests',
+          lambda options: RunTestSuites(options, ['gfx_unittests'])),
+      ('gl_unittests',
+          lambda options: RunTestSuites(options, ['gl_unittests'])),
+      ('gpu', RunGPUTests),
+      ('python_unittests', RunPythonUnitTests),
+      ('ui', RunInstrumentationTests),
+      ('unit', RunUnitTests),
+      ('webkit', RunWebkitTests),
+      ('webkit_layout', RunWebkitLayoutTests),
+  ]
+
+
+def MakeGSPath(options, gs_base_dir):
+  revision = _GetRevision(options)
+  bot_id = options.build_properties.get('buildername', 'testing')
+  randhash = hashlib.sha1(str(random.random())).hexdigest()
+  gs_path = '%s/%s/%s/%s' % (gs_base_dir, bot_id, revision, randhash)
+  # remove double slashes, happens with blank revisions and confuses gsutil
+  gs_path = re.sub('/+', '/', gs_path)
+  return gs_path
+
+def UploadHTML(options, gs_base_dir, dir_to_upload, link_text,
+               link_rel_path='index.html', gs_url=GS_URL):
+  """Uploads directory at |dir_to_upload| to Google Storage and output a link.
+
+  Args:
+    options: Command line options.
+    gs_base_dir: The Google Storage base directory (e.g.
+      'chromium-code-coverage/java')
+    dir_to_upload: Absolute path to the directory to be uploaded.
+    link_text: Link text to be displayed on the step.
+    link_rel_path: Link path relative to |dir_to_upload|.
+    gs_url: Google storage URL.
+  """
+  gs_path = MakeGSPath(options, gs_base_dir)
+  RunCmd([bb_utils.GSUTIL_PATH, 'cp', '-R', dir_to_upload, 'gs://%s' % gs_path])
+  bb_annotations.PrintLink(link_text,
+                           '%s/%s/%s' % (gs_url, gs_path, link_rel_path))
+
+
+def GenerateJavaCoverageReport(options):
+  """Generates an HTML coverage report using EMMA and uploads it."""
+  bb_annotations.PrintNamedStep('java_coverage_report')
+
+  coverage_html = os.path.join(options.coverage_dir, 'coverage_html')
+  RunCmd(['build/android/generate_emma_html.py',
+          '--coverage-dir', options.coverage_dir,
+          '--metadata-dir', os.path.join(CHROME_OUT_DIR, options.target),
+          '--cleanup',
+          '--output', os.path.join(coverage_html, 'index.html')])
+  return coverage_html
+
+
+def LogcatDump(options):
+  # Print logcat, kill logcat monitor
+  bb_annotations.PrintNamedStep('logcat_dump')
+  logcat_file = os.path.join(CHROME_OUT_DIR, options.target, 'full_log.txt')
+  RunCmd([SrcPath('build', 'android', 'adb_logcat_printer.py'),
+          '--output-path', logcat_file, LOGCAT_DIR])
+  gs_path = MakeGSPath(options, 'chromium-android/logcat_dumps')
+  RunCmd([bb_utils.GSUTIL_PATH, 'cp', '-z', 'txt', logcat_file,
+          'gs://%s' % gs_path])
+  bb_annotations.PrintLink('logcat dump', '%s/%s' % (GS_AUTH_URL, gs_path))
+
+
+def RunStackToolSteps(options):
+  """Run stack tool steps.
+
+  Stack tool is run for logcat dump, optionally for ASAN.
+  """
+  bb_annotations.PrintNamedStep('Run stack tool with logcat dump')
+  build_dir = os.path.join(CHROME_OUT_DIR, options.target)
+  logcat_file = os.path.join(build_dir, 'full_log.txt')
+  RunCmd([os.path.join(CHROME_SRC_DIR, 'third_party', 'android_platform',
+          'development', 'scripts', 'stack'),
+          '--output-directory', build_dir,
+          '--more-info', logcat_file])
+  if options.asan_symbolize:
+    bb_annotations.PrintNamedStep('Run stack tool for ASAN')
+    RunCmd([
+        os.path.join(CHROME_SRC_DIR, 'build', 'android', 'asan_symbolize.py'),
+        '--output-directory', build_dir,
+        '-l', logcat_file])
+
+
+def GenerateTestReport(options):
+  bb_annotations.PrintNamedStep('test_report')
+  for report in glob.glob(
+      os.path.join(CHROME_OUT_DIR, options.target, 'test_logs', '*.log')):
+    RunCmd(['cat', report])
+    os.remove(report)
+
+
+def MainTestWrapper(options):
+  exit_code = 0
+  try:
+    # Spawn logcat monitor
+    SpawnLogcatMonitor()
+
+    # Run all device setup steps
+    for _, cmd in GetDeviceSetupStepCmds():
+      cmd(options)
+
+    if options.install:
+      for i in options.install:
+        install_obj = INSTALLABLE_PACKAGES[i]
+        InstallApk(options, install_obj, print_step=True)
+
+    if options.test_filter:
+      exit_code = bb_utils.RunSteps(
+          options.test_filter, GetTestStepCmds(), options) or exit_code
+
+    if options.coverage_bucket:
+      coverage_html = GenerateJavaCoverageReport(options)
+      UploadHTML(options, '%s/java' % options.coverage_bucket, coverage_html,
+                 'Coverage Report')
+      shutil.rmtree(coverage_html, ignore_errors=True)
+
+    if options.experimental:
+      exit_code = RunTestSuites(
+          options, gtest_config.EXPERIMENTAL_TEST_SUITES) or exit_code
+
+    return exit_code
+
+  finally:
+    # Run all post test steps
+    LogcatDump(options)
+    if not options.disable_stack_tool:
+      RunStackToolSteps(options)
+    GenerateTestReport(options)
+    # KillHostHeartbeat() has logic to check if heartbeat process is running,
+    # and kills only if it finds the process is running on the host.
+    provision_devices.KillHostHeartbeat()
+    if options.cleanup:
+      shutil.rmtree(os.path.join(CHROME_OUT_DIR, options.target),
+          ignore_errors=True)
+
+
+def GetDeviceStepsOptParser():
+  parser = bb_utils.GetParser()
+  parser.add_option('--experimental', action='store_true',
+                    help='Run experiemental tests')
+  parser.add_option('-f', '--test-filter', metavar='<filter>', default=[],
+                    action='append',
+                    help=('Run a test suite. Test suites: "%s"' %
+                          '", "'.join(VALID_TESTS)))
+  parser.add_option('--gtest-filter',
+                    help='Filter for running a subset of tests of a gtest test')
+  parser.add_option('--asan', action='store_true', help='Run tests with asan.')
+  parser.add_option('--install', metavar='<apk name>', action="append",
+                    help='Install an apk by name')
+  parser.add_option('--no-reboot', action='store_true',
+                    help='Do not reboot devices during provisioning.')
+  parser.add_option('--coverage-bucket',
+                    help=('Bucket name to store coverage results. Coverage is '
+                          'only run if this is set.'))
+  parser.add_option('--restart-usb', action='store_true',
+                    help='Restart usb ports before device status check.')
+  parser.add_option(
+      '--flakiness-server',
+      help=('The flakiness dashboard server to which the results should be '
+            'uploaded.'))
+  parser.add_option(
+      '--auto-reconnect', action='store_true',
+      help='Push script to device which restarts adbd on disconnections.')
+  parser.add_option('--skip-wipe', action='store_true',
+                    help='Do not wipe devices during provisioning.')
+  parser.add_option('--disable-location', action='store_true',
+                    help='Disable location settings.')
+  parser.add_option(
+      '--logcat-dump-output',
+      help='The logcat dump output will be "tee"-ed into this file')
+  # During processing perf bisects, a seperate working directory created under
+  # which builds are produced. Therefore we should look for relevent output
+  # file under this directory.(/b/build/slave/<slave_name>/build/bisect/src/out)
+  parser.add_option(
+      '--chrome-output-dir',
+      help='Chrome output directory to be used while bisecting.')
+
+  parser.add_option('--disable-stack-tool', action='store_true',
+      help='Do not run stack tool.')
+  parser.add_option('--asan-symbolize', action='store_true',
+      help='Run stack tool for ASAN')
+  parser.add_option('--cleanup', action='store_true',
+      help='Delete out/<target> directory at the end of the run.')
+  return parser
+
+
+def main(argv):
+  parser = GetDeviceStepsOptParser()
+  options, args = parser.parse_args(argv[1:])
+
+  devil_chromium.Initialize()
+
+  if args:
+    return sys.exit('Unused args %s' % args)
+
+  unknown_tests = set(options.test_filter) - VALID_TESTS
+  if unknown_tests:
+    return sys.exit('Unknown tests %s' % list(unknown_tests))
+
+  setattr(options, 'target', options.factory_properties.get('target', 'Debug'))
+
+  # pylint: disable=global-statement
+  if options.chrome_output_dir:
+    global CHROME_OUT_DIR
+    global LOGCAT_DIR
+    CHROME_OUT_DIR = options.chrome_output_dir
+    LOGCAT_DIR = os.path.join(CHROME_OUT_DIR, 'logcat')
+
+  if options.coverage_bucket:
+    setattr(options, 'coverage_dir',
+            os.path.join(CHROME_OUT_DIR, options.target, 'coverage'))
+
+  return MainTestWrapper(options)
+
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv))
diff --git a/build/android/buildbot/bb_host_steps.py b/build/android/buildbot/bb_host_steps.py
new file mode 100755
index 0000000..04a0e38
--- /dev/null
+++ b/build/android/buildbot/bb_host_steps.py
@@ -0,0 +1,152 @@
+#!/usr/bin/env python
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import json
+import sys
+
+import bb_utils
+import bb_annotations
+
+sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
+from pylib.constants import host_paths
+
+
+SLAVE_SCRIPTS_DIR = os.path.join(bb_utils.BB_BUILD_DIR, 'scripts', 'slave')
+VALID_HOST_TESTS = set(['check_webview_licenses'])
+
+DIR_BUILD_ROOT = os.path.dirname(host_paths.DIR_SOURCE_ROOT)
+
+# Short hand for RunCmd which is used extensively in this file.
+RunCmd = bb_utils.RunCmd
+
+
+def SrcPath(*path):
+  return os.path.join(host_paths.DIR_SOURCE_ROOT, *path)
+
+
+def CheckWebViewLicenses(_):
+  bb_annotations.PrintNamedStep('check_licenses')
+  RunCmd([SrcPath('android_webview', 'tools', 'webview_licenses.py'), 'scan'],
+         warning_code=1)
+
+
+def RunHooks(build_type):
+  RunCmd([SrcPath('build', 'landmines.py')])
+  build_path = SrcPath('out', build_type)
+  landmine_path = os.path.join(build_path, '.landmines_triggered')
+  clobber_env = os.environ.get('BUILDBOT_CLOBBER')
+  if clobber_env or os.path.isfile(landmine_path):
+    bb_annotations.PrintNamedStep('Clobber')
+    if not clobber_env:
+      print 'Clobbering due to triggered landmines:'
+      with open(landmine_path) as f:
+        print f.read()
+    RunCmd(['rm', '-rf', build_path])
+
+  bb_annotations.PrintNamedStep('runhooks')
+  RunCmd(['gclient', 'runhooks'], halt_on_failure=True)
+
+
+def GenerateBuildFiles(options):
+  cmd = [SrcPath('tools', 'mb', 'mb.py'),
+         'gen',
+         '-m', options.build_properties['mastername'],
+         '-b', options.build_properties['buildername'],
+         '--goma-dir', bb_utils.GOMA_DIR,
+         '//out/%s' % options.target]
+  bb_annotations.PrintNamedStep('generate_build_files')
+  RunCmd(cmd, halt_on_failure=True)
+
+
+def Compile(options):
+  if options.run_mb:
+    os.environ['GYP_CHROMIUM_NO_ACTION'] = '1'
+    RunHooks(options.target)
+    GenerateBuildFiles(options)
+  else:
+    RunHooks(options.target)
+
+  cmd = [os.path.join(SLAVE_SCRIPTS_DIR, 'compile.py'),
+         '--build-tool=ninja',
+         '--compiler=goma',
+         '--target=%s' % options.target,
+         '--goma-dir=%s' % bb_utils.GOMA_DIR]
+  bb_annotations.PrintNamedStep('compile')
+  if options.build_targets:
+    build_targets = options.build_targets.split(',')
+    cmd += ['--build-args', ' '.join(build_targets)]
+  RunCmd(cmd, halt_on_failure=True, cwd=DIR_BUILD_ROOT)
+
+
+def ZipBuild(options):
+  bb_annotations.PrintNamedStep('zip_build')
+  RunCmd([
+      os.path.join(SLAVE_SCRIPTS_DIR, 'zip_build.py'),
+      '--src-dir', host_paths.DIR_SOURCE_ROOT,
+      '--exclude-files', 'lib.target,gen,android_webview,jingle_unittests']
+      + bb_utils.EncodeProperties(options), cwd=DIR_BUILD_ROOT)
+
+
+def ExtractBuild(options):
+  bb_annotations.PrintNamedStep('extract_build')
+  RunCmd([os.path.join(SLAVE_SCRIPTS_DIR, 'extract_build.py')]
+         + bb_utils.EncodeProperties(options), cwd=DIR_BUILD_ROOT)
+
+
+def BisectPerfRegression(options):
+  args = []
+  if options.extra_src:
+    args = ['--extra_src', options.extra_src]
+  RunCmd([SrcPath('tools', 'prepare-bisect-perf-regression.py'),
+          '-w', os.path.join(host_paths.DIR_SOURCE_ROOT, os.pardir)])
+  RunCmd([SrcPath('tools', 'run-bisect-perf-regression.py'),
+          '-w', os.path.join(host_paths.DIR_SOURCE_ROOT, os.pardir),
+          '--build-properties=%s' % json.dumps(options.build_properties)] +
+          args)
+
+
+def GetHostStepCmds():
+  return [
+      ('compile', Compile),
+      ('extract_build', ExtractBuild),
+      ('check_webview_licenses', CheckWebViewLicenses),
+      ('bisect_perf_regression', BisectPerfRegression),
+      ('zip_build', ZipBuild)
+  ]
+
+
+def GetHostStepsOptParser():
+  parser = bb_utils.GetParser()
+  parser.add_option('--steps', help='Comma separated list of host tests.')
+  parser.add_option('--build-targets', default='',
+                    help='Comma separated list of build targets.')
+  parser.add_option('--experimental', action='store_true',
+                    help='Indicate whether to compile experimental targets.')
+  parser.add_option('--extra_src', default='',
+                    help='Path to extra source file. If this is supplied, '
+                    'bisect script will use it to override default behavior.')
+  parser.add_option('--run-mb', action='store_true',
+                    help='Use mb to generate build files.')
+
+  return parser
+
+
+def main(argv):
+  parser = GetHostStepsOptParser()
+  options, args = parser.parse_args(argv[1:])
+  if args:
+    return sys.exit('Unused args %s' % args)
+
+  setattr(options, 'target', options.factory_properties.get('target', 'Debug'))
+  setattr(options, 'extra_src',
+          options.factory_properties.get('extra_src', ''))
+
+  if options.steps:
+    bb_utils.RunSteps(options.steps.split(','), GetHostStepCmds(), options)
+
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv))
diff --git a/build/android/buildbot/bb_run_bot.py b/build/android/buildbot/bb_run_bot.py
new file mode 100755
index 0000000..4cdb572
--- /dev/null
+++ b/build/android/buildbot/bb_run_bot.py
@@ -0,0 +1,316 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+import copy
+import json
+import os
+import pipes
+import re
+import subprocess
+import sys
+
+import bb_utils
+
+sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
+from pylib import constants
+
+
+CHROMIUM_COVERAGE_BUCKET = 'chromium-code-coverage'
+
+_BotConfig = collections.namedtuple(
+    'BotConfig', ['bot_id', 'host_obj', 'test_obj'])
+
+HostConfig = collections.namedtuple(
+    'HostConfig',
+    ['script', 'host_steps', 'extra_args', 'extra_gyp_defines', 'target_arch'])
+
+TestConfig = collections.namedtuple('Tests', ['script', 'tests', 'extra_args'])
+
+
+def BotConfig(bot_id, host_object, test_object=None):
+  return _BotConfig(bot_id, host_object, test_object)
+
+
+def DictDiff(d1, d2):
+  diff = []
+  for key in sorted(set(d1.keys() + d2.keys())):
+    if key in d1 and d1[key] != d2.get(key):
+      diff.append('- %s=%s' % (key, pipes.quote(d1[key])))
+    if key in d2 and d2[key] != d1.get(key):
+      diff.append('+ %s=%s' % (key, pipes.quote(d2[key])))
+  return '\n'.join(diff)
+
+
+def GetEnvironment(host_obj, testing, extra_env_vars=None):
+  init_env = dict(os.environ)
+  init_env['GYP_GENERATORS'] = 'ninja'
+  if extra_env_vars:
+    init_env.update(extra_env_vars)
+  envsetup_cmd = '. build/android/envsetup.sh'
+  if testing:
+    # Skip envsetup to avoid presubmit dependence on android deps.
+    print 'Testing mode - skipping "%s"' % envsetup_cmd
+    envsetup_cmd = ':'
+  else:
+    print 'Running %s' % envsetup_cmd
+  proc = subprocess.Popen(['bash', '-exc',
+    envsetup_cmd + ' >&2; python build/android/buildbot/env_to_json.py'],
+    stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+    cwd=bb_utils.CHROME_SRC, env=init_env)
+  json_env, envsetup_output = proc.communicate()
+  if proc.returncode != 0:
+    print >> sys.stderr, 'FATAL Failure in envsetup.'
+    print >> sys.stderr, envsetup_output
+    sys.exit(1)
+  env = json.loads(json_env)
+  env['GYP_DEFINES'] = env.get('GYP_DEFINES', '') + \
+      ' OS=android fastbuild=1 use_goma=1 gomadir=%s' % bb_utils.GOMA_DIR
+  if host_obj.target_arch:
+    env['GYP_DEFINES'] += ' target_arch=%s' % host_obj.target_arch
+  extra_gyp = host_obj.extra_gyp_defines
+  if extra_gyp:
+    env['GYP_DEFINES'] += ' %s' % extra_gyp
+    if re.search('(asan|clang)=1', extra_gyp):
+      env.pop('CXX_target', None)
+
+  # Bots checkout chrome in /b/build/slave/<name>/build/src
+  build_internal_android = os.path.abspath(os.path.join(
+      bb_utils.CHROME_SRC, '..', '..', '..', '..', '..', 'build_internal',
+      'scripts', 'slave', 'android'))
+  if os.path.exists(build_internal_android):
+    env['PATH'] = os.pathsep.join([build_internal_android, env['PATH']])
+  return env
+
+
+def GetCommands(options, bot_config):
+  """Get a formatted list of commands.
+
+  Args:
+    options: Options object.
+    bot_config: A BotConfig named tuple.
+    host_step_script: Host step script.
+    device_step_script: Device step script.
+  Returns:
+    list of Command objects.
+  """
+  property_args = bb_utils.EncodeProperties(options)
+  commands = [[bot_config.host_obj.script,
+               '--steps=%s' % ','.join(bot_config.host_obj.host_steps)] +
+              property_args + (bot_config.host_obj.extra_args or [])]
+
+  test_obj = bot_config.test_obj
+  if test_obj:
+    run_test_cmd = [test_obj.script] + property_args
+    for test in test_obj.tests:
+      run_test_cmd.extend(['-f', test])
+    if test_obj.extra_args:
+      run_test_cmd.extend(test_obj.extra_args)
+    commands.append(run_test_cmd)
+  return commands
+
+
+def GetBotStepMap():
+  compile_step = ['compile']
+  python_unittests = ['python_unittests']
+  std_host_tests = ['check_webview_licenses']
+  std_build_steps = ['compile', 'zip_build']
+  std_test_steps = ['extract_build']
+  std_tests = ['ui', 'unit']
+  trial_tests = [
+      'base_junit_tests',
+      'components_browsertests',
+      'gfx_unittests',
+      'gl_unittests',
+  ]
+  flakiness_server = (
+      '--flakiness-server=%s' % constants.UPSTREAM_FLAKINESS_SERVER)
+  experimental = ['--experimental']
+  run_mb = ['--run-mb']
+  bisect_chrome_output_dir = os.path.abspath(
+      os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir,
+                   os.pardir, 'bisect', 'src', 'out'))
+  B = BotConfig
+  H = (lambda steps, extra_args=None, extra_gyp=None, target_arch=None:
+       HostConfig('build/android/buildbot/bb_host_steps.py', steps, extra_args,
+                  extra_gyp, target_arch))
+  T = (lambda tests, extra_args=None:
+       TestConfig('build/android/buildbot/bb_device_steps.py', tests,
+                  extra_args))
+
+  bot_configs = [
+      # Main builders
+      B('main-builder-dbg', H(std_build_steps + std_host_tests)),
+      B('main-builder-rel', H(std_build_steps)),
+      B('main-clang-builder',
+        H(compile_step, extra_gyp='clang=1 component=shared_library')),
+      B('main-clobber', H(compile_step)),
+      B('main-tests-rel', H(std_test_steps),
+        T(std_tests, ['--cleanup', flakiness_server])),
+      B('main-tests', H(std_test_steps),
+        T(std_tests, ['--cleanup', flakiness_server])),
+
+      # Other waterfalls
+      B('asan-builder-tests', H(compile_step,
+                                extra_gyp='asan=1 component=shared_library'),
+        T(std_tests, ['--asan', '--asan-symbolize'])),
+      B('blink-try-builder', H(compile_step)),
+      B('chromedriver-fyi-tests-dbg', H(std_test_steps),
+        T(['chromedriver'],
+          ['--install=ChromePublic', '--install=ChromeDriverWebViewShell',
+           '--skip-wipe', '--disable-location', '--cleanup'])),
+      B('fyi-x86-builder-dbg',
+        H(compile_step + std_host_tests, experimental, target_arch='ia32')),
+      B('fyi-builder-dbg',
+        H(std_build_steps + std_host_tests, experimental,
+          extra_gyp='emma_coverage=1')),
+      B('x86-builder-dbg',
+        H(compile_step + std_host_tests, target_arch='ia32')),
+      B('fyi-builder-rel', H(std_build_steps, experimental)),
+      B('fyi-tests', H(std_test_steps),
+        T(std_tests + python_unittests,
+                      ['--experimental', flakiness_server,
+                      '--coverage-bucket', CHROMIUM_COVERAGE_BUCKET,
+                      '--cleanup'])),
+      B('user-build-fyi-tests-dbg', H(std_test_steps),
+        T(sorted(trial_tests))),
+      B('fyi-component-builder-tests-dbg',
+        H(compile_step, extra_gyp='component=shared_library'),
+        T(std_tests, ['--experimental', flakiness_server])),
+      B('gpu-builder-tests-dbg',
+        H(compile_step, extra_args=run_mb),
+        T(['gpu'], ['--install=ContentShell'])),
+      # Pass empty T([]) so that logcat monitor and device status check are run.
+      B('perf-bisect-builder-tests-dbg',
+        H(['bisect_perf_regression']),
+        T([], ['--chrome-output-dir', bisect_chrome_output_dir])),
+      B('perf-tests-rel', H(std_test_steps),
+        T([], ['--cleanup'])),
+      B('webkit-latest-webkit-tests', H(std_test_steps),
+        T(['webkit_layout', 'webkit'], ['--cleanup', '--auto-reconnect'])),
+      B('webkit-latest-contentshell', H(compile_step),
+        T(['webkit_layout'], ['--auto-reconnect'])),
+      B('builder-unit-tests', H(compile_step), T(['unit'])),
+
+      # Generic builder config (for substring match).
+      B('builder', H(std_build_steps)),
+  ]
+
+  bot_map = dict((config.bot_id, config) for config in bot_configs)
+
+  # These bots have identical configuration to ones defined earlier.
+  copy_map = [
+      ('lkgr-clobber', 'main-clobber'),
+      ('try-builder-dbg', 'main-builder-dbg'),
+      ('try-builder-rel', 'main-builder-rel'),
+      ('try-clang-builder', 'main-clang-builder'),
+      ('try-fyi-builder-dbg', 'fyi-builder-dbg'),
+      ('try-x86-builder-dbg', 'x86-builder-dbg'),
+      ('try-tests-rel', 'main-tests-rel'),
+      ('try-tests', 'main-tests'),
+      ('try-fyi-tests', 'fyi-tests'),
+      ('webkit-latest-tests', 'main-tests'),
+  ]
+  for to_id, from_id in copy_map:
+    assert to_id not in bot_map
+    # pylint: disable=W0212
+    bot_map[to_id] = copy.deepcopy(bot_map[from_id])._replace(bot_id=to_id)
+
+    # Trybots do not upload to flakiness dashboard. They should be otherwise
+    # identical in configuration to their trunk building counterparts.
+    test_obj = bot_map[to_id].test_obj
+    if to_id.startswith('try') and test_obj:
+      extra_args = test_obj.extra_args
+      if extra_args and flakiness_server in extra_args:
+        extra_args.remove(flakiness_server)
+  return bot_map
+
+
+# Return an object from the map, looking first for an exact id match.
+# If this fails, look for an id which is a substring of the specified id.
+# Choose the longest of all substring matches.
+# pylint: disable=W0622
+def GetBestMatch(id_map, id):
+  config = id_map.get(id)
+  if not config:
+    substring_matches = [x for x in id_map.iterkeys() if x in id]
+    if substring_matches:
+      max_id = max(substring_matches, key=len)
+      print 'Using config from id="%s" (substring match).' % max_id
+      config = id_map[max_id]
+  return config
+
+
+def GetRunBotOptParser():
+  parser = bb_utils.GetParser()
+  parser.add_option('--bot-id', help='Specify bot id directly.')
+  parser.add_option('--testing', action='store_true',
+                    help='For testing: print, but do not run commands')
+
+  return parser
+
+
+def GetBotConfig(options, bot_step_map):
+  bot_id = options.bot_id or options.factory_properties.get('android_bot_id')
+  if not bot_id:
+    print (sys.stderr,
+           'A bot id must be specified through option or factory_props.')
+    return
+
+  bot_config = GetBestMatch(bot_step_map, bot_id)
+  if not bot_config:
+    print 'Error: config for id="%s" cannot be inferred.' % bot_id
+  return bot_config
+
+
+def RunBotCommands(options, commands, env):
+  print 'Environment changes:'
+  print DictDiff(dict(os.environ), env)
+
+  for command in commands:
+    print bb_utils.CommandToString(command)
+    sys.stdout.flush()
+    if options.testing:
+      env['BUILDBOT_TESTING'] = '1'
+    return_code = subprocess.call(command, cwd=bb_utils.CHROME_SRC, env=env)
+    if return_code != 0:
+      return return_code
+
+
+def main(argv):
+  proc = subprocess.Popen(
+      ['/bin/hostname', '-f'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+  hostname_stdout, hostname_stderr = proc.communicate()
+  if proc.returncode == 0:
+    print 'Running on: ' + hostname_stdout
+  else:
+    print >> sys.stderr, 'WARNING: failed to run hostname'
+    print >> sys.stderr, hostname_stdout
+    print >> sys.stderr, hostname_stderr
+    sys.exit(1)
+
+  parser = GetRunBotOptParser()
+  options, args = parser.parse_args(argv[1:])
+  if args:
+    parser.error('Unused args: %s' % args)
+
+  bot_config = GetBotConfig(options, GetBotStepMap())
+  if not bot_config:
+    sys.exit(1)
+
+  print 'Using config:', bot_config
+
+  commands = GetCommands(options, bot_config)
+  for command in commands:
+    print 'Will run: ', bb_utils.CommandToString(command)
+  print
+
+  env = GetEnvironment(bot_config.host_obj, options.testing)
+  return RunBotCommands(options, commands, env)
+
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv))
diff --git a/build/android/buildbot/bb_utils.py b/build/android/buildbot/bb_utils.py
new file mode 100644
index 0000000..71ac7b2
--- /dev/null
+++ b/build/android/buildbot/bb_utils.py
@@ -0,0 +1,104 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import optparse
+import os
+import subprocess
+import sys
+
+import bb_annotations
+
+sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
+import devil_chromium # pylint: disable=unused-import
+from devil.utils import cmd_helper
+from pylib import constants
+
+
+TESTING = 'BUILDBOT_TESTING' in os.environ
+
+BB_BUILD_DIR = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir,
+    os.pardir, os.pardir, os.pardir, os.pardir))
+
+CHROME_SRC = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), '..', '..', '..'))
+
+# TODO: Figure out how to merge this with devil.utils.cmd_helper.OutDirectory().
+CHROME_OUT_DIR = os.path.join(CHROME_SRC, 'out')
+
+GOMA_DIR = os.environ.get('GOMA_DIR', os.path.join(BB_BUILD_DIR, 'goma'))
+
+GSUTIL_PATH = os.path.join(BB_BUILD_DIR, 'third_party', 'gsutil', 'gsutil')
+
+def CommandToString(command):
+  """Returns quoted command that can be run in bash shell."""
+  return ' '.join(cmd_helper.SingleQuote(c) for c in command)
+
+
+def SpawnCmd(command, stdout=None, cwd=CHROME_SRC):
+  """Spawn a process without waiting for termination."""
+  print '>', CommandToString(command)
+  sys.stdout.flush()
+  if TESTING:
+    class MockPopen(object):
+      @staticmethod
+      def wait():
+        return 0
+      @staticmethod
+      def communicate():
+        return '', ''
+    return MockPopen()
+  return subprocess.Popen(command, cwd=cwd, stdout=stdout)
+
+
+def RunCmd(command, flunk_on_failure=True, halt_on_failure=False,
+           warning_code=constants.WARNING_EXIT_CODE, stdout=None,
+           cwd=CHROME_SRC):
+  """Run a command relative to the chrome source root."""
+  code = SpawnCmd(command, stdout, cwd).wait()
+  print '<', CommandToString(command)
+  if code != 0:
+    print 'ERROR: process exited with code %d' % code
+    if code != warning_code and flunk_on_failure:
+      bb_annotations.PrintError()
+    else:
+      bb_annotations.PrintWarning()
+    # Allow steps to have both halting (i.e. 1) and non-halting exit codes.
+    if code != warning_code and halt_on_failure:
+      print 'FATAL %d != %d' % (code, warning_code)
+      sys.exit(1)
+  return code
+
+
+def GetParser():
+  def ConvertJson(option, _, value, parser):
+    setattr(parser.values, option.dest, json.loads(value))
+  parser = optparse.OptionParser()
+  parser.add_option('--build-properties', action='callback',
+                    callback=ConvertJson, type='string', default={},
+                    help='build properties in JSON format')
+  parser.add_option('--factory-properties', action='callback',
+                    callback=ConvertJson, type='string', default={},
+                    help='factory properties in JSON format')
+  return parser
+
+
+def EncodeProperties(options):
+  return ['--factory-properties=%s' % json.dumps(options.factory_properties),
+          '--build-properties=%s' % json.dumps(options.build_properties)]
+
+
+def RunSteps(steps, step_cmds, options):
+  unknown_steps = set(steps) - set(step for step, _ in step_cmds)
+  if unknown_steps:
+    print >> sys.stderr, 'FATAL: Unknown steps %s' % list(unknown_steps)
+    sys.exit(1)
+
+  exit_code = 0
+  for step, cmd in step_cmds:
+    if step in steps:
+      exit_code = cmd(options) or exit_code
+
+  return exit_code
diff --git a/build/android/buildbot/env_to_json.py b/build/android/buildbot/env_to_json.py
new file mode 100755
index 0000000..f9a7a44
--- /dev/null
+++ b/build/android/buildbot/env_to_json.py
@@ -0,0 +1,11 @@
+#!/usr/bin/python
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Encode current environment into json.
+
+import json
+import os
+
+print json.dumps(dict(os.environ))
diff --git a/build/android/buildbot/tests/bb_run_bot_test.py b/build/android/buildbot/tests/bb_run_bot_test.py
new file mode 100755
index 0000000..810c60d
--- /dev/null
+++ b/build/android/buildbot/tests/bb_run_bot_test.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import subprocess
+import sys
+
+BUILDBOT_DIR = os.path.join(os.path.dirname(__file__), '..')
+sys.path.append(BUILDBOT_DIR)
+import bb_run_bot
+
+def RunBotProcesses(bot_process_map):
+  code = 0
+  for bot, proc in bot_process_map:
+    _, err = proc.communicate()
+    code |= proc.returncode
+    if proc.returncode != 0:
+      print 'Error running the bot script with id="%s"' % bot, err
+
+  return code
+
+
+def main():
+  procs = [
+      (bot, subprocess.Popen(
+          [os.path.join(BUILDBOT_DIR, 'bb_run_bot.py'), '--bot-id', bot,
+          '--testing'], stdout=subprocess.PIPE, stderr=subprocess.PIPE))
+      for bot in bb_run_bot.GetBotStepMap()]
+  return RunBotProcesses(procs)
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/build/android/copy_ex.gypi b/build/android/copy_ex.gypi
new file mode 100644
index 0000000..8c49d24
--- /dev/null
+++ b/build/android/copy_ex.gypi
@@ -0,0 +1,79 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Copy files to a directory with the option to clear directory first.
+#
+# Variables:
+#   dest_path - directory to copy files to.
+#   src_files - optional, a list of files to copy without changing name.
+#   clear - optional, if set, clear directory before copying files.
+#   renaming_sources - optional, a list of files to copy and rename.
+#   renaming_destinations - optional, a list of new file names corresponding to
+#                           renaming_sources.
+#
+# Exmaple
+#  {
+#    'target_name': 'copy_assets',
+#    'type': 'none',
+#    'variables': {
+#      'dest_path': 'apk/assets/path',
+#      'src_files': ['path1/fr.pak'],
+#      'clear': 1,
+#      # path2/old1 and path3/old2 will be copied to apk/assets/path and
+#      # renamed to new1, new2 respectly.
+#      'renaming_sources': ['path2/old1', 'path3/old2'],
+#      'renaming_destinations': ['new1', 'new2'],
+#    },
+#    'includes': [ '../build/android/copy_ex.gypi' ],
+#  },
+#
+{
+  'variables': {
+    'clear%': 0,
+    'src_files%': [],
+    'renaming_sources%': [],
+    'renaming_destinations%': [],
+  },
+  'actions': [{
+    'action_name': '<(_target_name)_copy_ex',
+    'variables': {
+      'additional_args':[],
+      'local_inputs': [],
+      'dest_files': [],
+      'conditions': [
+        ['clear == 1', {
+          'additional_args': ['--clear'],
+        }],
+        ['src_files != []', {
+          'additional_args': ['--files', '<(src_files)'],
+          'local_inputs': ['<@(src_files)'],
+          # src_files will be used to generate destination files path for
+          # outputs.
+          'dest_files': ['<@(src_files)'],
+        }],
+        ['renaming_sources != []', {
+          'additional_args': [
+            '--renaming-sources', '<(renaming_sources)',
+            '--renaming-destinations', '<(renaming_destinations)'
+          ],
+          'local_inputs': ['<@(renaming_sources)'],
+          'dest_files': ['<@(renaming_destinations)'],
+        }],
+      ],
+    },
+    'inputs': [
+      '<(DEPTH)/build/android/gyp/copy_ex.py',
+      '<(DEPTH)/build/android/gyp/generate_copy_ex_outputs.py',
+      '<@(local_inputs)',
+    ],
+    'outputs': [
+      '<!@pymod_do_main(generate_copy_ex_outputs --dest-path <(dest_path) --src-files <(dest_files))',
+    ],
+    'action': [
+      'python', '<(DEPTH)/build/android/gyp/copy_ex.py',
+      '--dest', '<(dest_path)',
+      '<@(additional_args)',
+    ],
+  }],
+}
diff --git a/build/android/create_standalone_apk_action.gypi b/build/android/create_standalone_apk_action.gypi
new file mode 100644
index 0000000..d17af7c
--- /dev/null
+++ b/build/android/create_standalone_apk_action.gypi
@@ -0,0 +1,41 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This file is meant to be included into an action to provide an action that
+# combines a directory of shared libraries and an incomplete APK into a
+# standalone APK.
+#
+# To use this, create a gyp action with the following form:
+#  {
+#    'action_name': 'some descriptive action name',
+#    'variables': {
+#      'inputs': [ 'input_path1', 'input_path2' ],
+#      'input_apk_path': '<(unsigned_apk_path)',
+#      'output_apk_path': '<(unsigned_standalone_apk_path)',
+#      'libraries_top_dir': '<(libraries_top_dir)',
+#    },
+#    'includes': [ 'relative/path/to/create_standalone_apk_action.gypi' ],
+#  },
+
+{
+  'message': 'Creating standalone APK: <(output_apk_path)',
+  'variables': {
+    'inputs': [],
+  },
+  'inputs': [
+    '<(DEPTH)/build/android/gyp/util/build_utils.py',
+    '<(DEPTH)/build/android/gyp/create_standalone_apk.py',
+    '<(input_apk_path)',
+    '>@(inputs)',
+  ],
+  'outputs': [
+    '<(output_apk_path)',
+  ],
+  'action': [
+    'python', '<(DEPTH)/build/android/gyp/create_standalone_apk.py',
+    '--libraries-top-dir=<(libraries_top_dir)',
+    '--input-apk-path=<(input_apk_path)',
+    '--output-apk-path=<(output_apk_path)',
+  ],
+}
diff --git a/build/android/developer_recommended_flags.gypi b/build/android/developer_recommended_flags.gypi
new file mode 100644
index 0000000..79c201d
--- /dev/null
+++ b/build/android/developer_recommended_flags.gypi
@@ -0,0 +1,61 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This is the set of recommended gyp variable settings for Chrome for Android development.
+#
+# These can be used by copying this file to $CHROME_SRC/chrome/supplement.gypi.
+#
+# Even better, create chrome/supplement.gypi containing the following:
+#   {
+#     'includes': [ '../build/android/developer_recommended_flags.gypi' ]
+#   }
+# and you'll get new settings automatically.
+# When using this method, you can override individual settings by setting them unconditionally (with
+# no %) in chrome/supplement.gypi.
+# I.e. to disable gyp_managed_install but use everything else:
+#   {
+#     'variables': {
+#       'gyp_managed_install': 0,
+#     },
+#     'includes': [ '../build/android/developer_recommended_flags.gypi' ]
+#   }
+
+{
+  'variables': {
+    'variables': {
+      # Set component to 'shared_library' to enable the component build. This builds native code as
+      # many small shared libraries instead of one monolithic library. This slightly reduces the time
+      # required for incremental builds.
+      'component%': 'shared_library',
+    },
+    'component%': '<(component)',
+
+    # When gyp_managed_install is set to 1, building an APK will install that APK on the connected
+    # device(/emulator). To install on multiple devices (or onto a new device), build the APK once
+    # with each device attached. This greatly reduces the time required for incremental builds.
+    #
+    # This comes with some caveats:
+    #   Only works with a single device connected (it will print a warning if
+    #     zero or multiple devices are attached).
+    #   Device must be flashed with a user-debug unsigned Android build.
+    #   Some actions are always run (i.e. ninja will never say "no work to do").
+    'gyp_managed_install%': 1,
+
+    # With gyp_managed_install, we do not necessarily need a standalone APK.
+    # When create_standalone_apk is set to 1, we will build a standalone APK
+    # anyway. For even faster builds, you can set create_standalone_apk to 0.
+    'create_standalone_apk%': 1,
+
+    # Set clang to 1 to use the clang compiler. Clang has much (much, much) better warning/error
+    # messages than gcc.
+    # TODO(cjhopman): Enable this when http://crbug.com/156420 is addressed. Until then, users can
+    # set clang to 1, but Android stack traces will sometimes be incomplete.
+    #'clang%': 1,
+
+    # Set fastbuild to 1 to build with less debugging information. This can greatly decrease linking
+    # time. The downside is that stack traces will be missing useful information (like line
+    # numbers).
+    #'fastbuild%': 1,
+  },
+}
diff --git a/build/android/devil_chromium.json b/build/android/devil_chromium.json
new file mode 100644
index 0000000..c1157fa
--- /dev/null
+++ b/build/android/devil_chromium.json
@@ -0,0 +1,69 @@
+{
+  "config_type": "BaseConfig",
+  "dependencies": {
+    "aapt": {
+      "file_info": {
+        "linux2_x86_64": {
+          "local_paths": [
+            "../../third_party/android_tools/sdk/build-tools/23.0.1/aapt"
+          ]
+        }
+      }
+    },
+    "adb": {
+      "file_info": {
+        "linux2_x86_64": {
+          "local_paths": [
+            "../../third_party/android_tools/sdk/platform-tools/adb"
+          ]
+        }
+      }
+    },
+    "android_sdk": {
+      "file_info": {
+        "linux2_x86_64": {
+          "local_paths": [
+            "../../third_party/android_tools/sdk"
+          ]
+        }
+      }
+    },
+    "dexdump": {
+      "file_info": {
+        "linux2_x86_64": {
+          "local_paths": [
+            "../../third_party/android_tools/sdk/build-tools/23.0.1/dexdump"
+          ]
+        }
+      }
+    },
+    "split-select": {
+      "file_info": {
+        "linux2_x86_64": {
+          "local_paths": [
+            "../../third_party/android_tools/sdk/build-tools/23.0.1/split-select"
+          ]
+        }
+      }
+    },
+    "pymock": {
+      "file_info": {
+        "darwin_x86_64": {
+          "local_paths": [
+            "../../third_party/pymock"
+          ]
+        },
+        "linux2_x86_64": {
+          "local_paths": [
+            "../../third_party/pymock"
+          ]
+        },
+        "win32_AMD64": {
+          "local_paths": [
+            "../../third_party/pymock"
+          ]
+        }
+      }
+    }
+  }
+}
diff --git a/build/android/devil_chromium.py b/build/android/devil_chromium.py
new file mode 100644
index 0000000..8472f65
--- /dev/null
+++ b/build/android/devil_chromium.py
@@ -0,0 +1,153 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Configures devil for use in chromium."""
+
+import os
+import sys
+
+from pylib.constants import host_paths
+
+if host_paths.DEVIL_PATH not in sys.path:
+  sys.path.append(host_paths.DEVIL_PATH)
+
+from devil import devil_env
+
+_DEVIL_CONFIG = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), 'devil_chromium.json'))
+
+_DEVIL_BUILD_PRODUCT_DEPS = {
+  'forwarder_device': [
+    {
+      'platform': 'android',
+      'arch': 'armeabi-v7a',
+      'name': 'forwarder_dist',
+    },
+    {
+      'platform': 'android',
+      'arch': 'arm64-v8a',
+      'name': 'forwarder_dist',
+    },
+    {
+      'platform': 'android',
+      'arch': 'mips',
+      'name': 'forwarder_dist',
+    },
+    {
+      'platform': 'android',
+      'arch': 'mips64',
+      'name': 'forwarder_dist',
+    },
+    {
+      'platform': 'android',
+      'arch': 'x86',
+      'name': 'forwarder_dist',
+    },
+    {
+      'platform': 'android',
+      'arch': 'x86_64',
+      'name': 'forwarder_dist',
+    },
+  ],
+  'forwarder_host': [
+    {
+      'platform': 'linux2',
+      'arch': 'x86_64',
+      'name': 'host_forwarder',
+    },
+  ],
+  'md5sum_device': [
+    {
+      'platform': 'android',
+      'arch': 'armeabi-v7a',
+      'name': 'md5sum_dist',
+    },
+    {
+      'platform': 'android',
+      'arch': 'arm64-v8a',
+      'name': 'md5sum_dist',
+    },
+    {
+      'platform': 'android',
+      'arch': 'mips',
+      'name': 'md5sum_dist',
+    },
+    {
+      'platform': 'android',
+      'arch': 'mips64',
+      'name': 'md5sum_dist',
+    },
+    {
+      'platform': 'android',
+      'arch': 'x86',
+      'name': 'md5sum_dist',
+    },
+    {
+      'platform': 'android',
+      'arch': 'x86_64',
+      'name': 'md5sum_dist',
+    },
+  ],
+  'md5sum_host': [
+    {
+      'platform': 'linux2',
+      'arch': 'x86_64',
+      'name': 'md5sum_bin_host',
+    },
+  ],
+}
+
+
+def Initialize(output_directory=None, custom_deps=None):
+  """Initializes devil with chromium's binaries and third-party libraries.
+
+  This includes:
+    - Libraries:
+      - the android SDK ("android_sdk")
+      - pymock ("pymock")
+    - Build products:
+      - host & device forwarder binaries
+          ("forwarder_device" and "forwarder_host")
+      - host & device md5sum binaries ("md5sum_device" and "md5sum_host")
+
+  Args:
+    output_directory: An optional path to the output directory. If not set,
+      no built dependencies are configured.
+    custom_deps: An optional dictionary specifying custom dependencies.
+      This should be of the form:
+
+        {
+          'dependency_name': {
+            'platform': 'path',
+            ...
+          },
+          ...
+        }
+  """
+
+  devil_dynamic_config = {
+    'config_type': 'BaseConfig',
+    'dependencies': {},
+  }
+  if output_directory:
+    output_directory = os.path.abspath(output_directory)
+    devil_dynamic_config['dependencies'] = {
+      dep_name: {
+        'file_info': {
+          '%s_%s' % (dep_config['platform'], dep_config['arch']): {
+            'local_paths': [
+              os.path.join(output_directory, dep_config['name']),
+            ],
+          }
+          for dep_config in dep_configs
+        }
+      }
+      for dep_name, dep_configs in _DEVIL_BUILD_PRODUCT_DEPS.iteritems()
+    }
+  if custom_deps:
+    devil_dynamic_config['dependencies'].update(custom_deps)
+
+  devil_env.config.Initialize(
+      configs=[devil_dynamic_config], config_files=[_DEVIL_CONFIG])
+
diff --git a/build/android/dex_action.gypi b/build/android/dex_action.gypi
new file mode 100644
index 0000000..7d9638e
--- /dev/null
+++ b/build/android/dex_action.gypi
@@ -0,0 +1,63 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This file is meant to be included into an action to provide a rule that dexes
+# compiled java files. If proguard_enabled == "true" and CONFIGURATION_NAME ==
+# "Release", then it will dex the proguard_enabled_input_path instead of the
+# normal dex_input_paths/dex_generated_input_paths.
+#
+# To use this, create a gyp target with the following form:
+#  {
+#    'action_name': 'some name for the action'
+#    'actions': [
+#      'variables': {
+#        'dex_input_paths': [ 'files to dex (when proguard is not used) and add to input paths' ],
+#        'dex_generated_input_dirs': [ 'dirs that contain generated files to dex' ],
+#
+#        # For targets that use proguard:
+#        'proguard_enabled': 'true',
+#        'proguard_enabled_input_path': 'path to dex when using proguard',
+#      },
+#      'includes': [ 'relative/path/to/dex_action.gypi' ],
+#    ],
+#  },
+#
+
+{
+  'message': 'Creating dex file: <(output_path)',
+  'variables': {
+    'dex_input_paths': [],
+    'dex_generated_input_dirs': [],
+    'proguard_enabled%': 'false',
+    # TODO(jbudorick): remove this once multidex is done.
+    'debug_build_proguard_enabled%': 'false',
+    'proguard_enabled_input_path%': '',
+    'dex_no_locals%': 0,
+    'dex_additional_options': [],
+  },
+  'inputs': [
+    '<(DEPTH)/build/android/gyp/util/build_utils.py',
+    '<(DEPTH)/build/android/gyp/util/md5_check.py',
+    '<(DEPTH)/build/android/gyp/dex.py',
+    '>@(dex_input_paths)',
+  ],
+  'outputs': [
+    '<(output_path)',
+    '<(output_path).inputs',
+  ],
+  'action': [
+    'python', '<(DEPTH)/build/android/gyp/dex.py',
+    '--dex-path=<(output_path)',
+    '--android-sdk-tools=<(android_sdk_tools)',
+    '--output-directory=<(PRODUCT_DIR)',
+    '--configuration-name=<(CONFIGURATION_NAME)',
+    '--proguard-enabled=>(proguard_enabled)',
+    '--debug-build-proguard-enabled=>(debug_build_proguard_enabled)',
+    '--proguard-enabled-input-path=<(proguard_enabled_input_path)',
+    '--no-locals=>(dex_no_locals)',
+    '>@(dex_additional_options)',
+    '>@(dex_input_paths)',
+    '>@(dex_generated_input_dirs)',
+  ]
+}
diff --git a/build/android/disable_gcc_lto.gypi b/build/android/disable_gcc_lto.gypi
new file mode 100644
index 0000000..a733c7a
--- /dev/null
+++ b/build/android/disable_gcc_lto.gypi
@@ -0,0 +1,20 @@
+# Copyright (c) 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This file is meant to be included to disable GCC LTO on a target.
+
+{
+  'target_conditions': [
+    ['_toolset=="target"', {
+      'conditions': [
+        ['OS=="android" and clang==0 and (use_lto==1 or use_lto_o2==1)', {
+          'cflags!': [
+            '-flto',
+            '-ffat-lto-objects',
+          ],
+        }],
+      ],
+    }],
+  ],
+}
diff --git a/build/android/docs/lint.md b/build/android/docs/lint.md
new file mode 100644
index 0000000..37f3550
--- /dev/null
+++ b/build/android/docs/lint.md
@@ -0,0 +1,91 @@
+# Lint
+
+Android's [**lint**](http://developer.android.com/tools/help/lint.html) is a static
+analysis tool that Chromium uses to catch possible issues in Java code.
+
+[TOC]
+
+## How Chromium uses lint
+
+Chromium runs lint on a per-target basis for all targets using any of the
+following templates if they are marked as Chromium code (i.e.,
+`chromium_code = true`):
+
+ - `android_apk`
+ - `android_library`
+ - `instrumentation_test_apk`
+ - `unittest_apk`
+
+Chromium also runs lint on a per-target basis for all targets using any of the
+following templates if they are marked as Chromium code and they support
+Android (i.e., `supports_android = true`): 
+
+ - `java_library`
+
+This is implemented in the
+[`android_lint`](https://code.google.com/p/chromium/codesearch#chromium/src/build/config/android/internal_rules.gni&q=android_lint%20file:internal_rules%5C.gni)
+gn template.
+
+## My code has a lint error
+
+If lint reports an issue in your code, there are several possible remedies.
+In descending order of preference:
+
+### Fix it
+
+While this isn't always the right response, fixing the lint error or warning
+should be the default.
+
+### Suppress it in code
+
+Android provides an annotation,
+[`@SuppressLint`](http://developer.android.com/reference/android/annotation/SuppressLint.html),
+that tells lint to ignore the annotated element. It can be used on classes,
+constructors, methods, parameters, fields, or local variables, though usage
+in Chromium is typically limited to the first three.
+
+Like many suppression annotations, `@SuppressLint` takes a value that tells **lint**
+what to ignore. It can be a single `String`:
+
+```java
+@SuppressLint("NewApi")
+public void foo() {
+    a.methodThatRequiresHighSdkLevel();
+}
+```
+
+It can also be a list of `String`s:
+
+```java
+@SuppressLint({
+        "NewApi",
+        "UseSparseArrays"
+        })
+public Map<Integer, FakeObject> bar() {
+    Map<Integer, FakeObject> shouldBeASparseArray = new HashMap<Integer, FakeObject>();
+    another.methodThatRequiresHighSdkLevel(shouldBeASparseArray);
+    return shouldBeASparseArray;
+}
+```
+
+This is the preferred way of suppressing warnings in a limited scope.
+
+### Suppress it in the suppressions XML file
+
+**lint** can be given an XML configuration containing warnings or errors that
+should be ignored. Chromium's lint suppression XML file can be found in
+[`build/android/lint/suppressions.xml`](https://chromium.googlesource.com/chromium/src/+/master/build/android/lint/suppressions.xml).
+It can be updated to suppress current warnings by running:
+
+```bash
+$ python build/android/lint/suppress.py <result.xml file>
+```
+
+e.g., to suppress lint errors found in `media_java`:
+
+```bash
+$ python build/android/lint/suppress.py out/Debug/gen/media/base/android/media_java__lint/result.xml
+```
+
+**This mechanism should only be used for disabling warnings across the entire code base; class-specific lint warnings should be disabled inline.**
+
diff --git a/build/android/download_doclava.py b/build/android/download_doclava.py
new file mode 100755
index 0000000..f9f9ea2
--- /dev/null
+++ b/build/android/download_doclava.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Minimal tool to download doclava from Google storage when building for
+Android."""
+
+import os
+import subprocess
+import sys
+
+
+# Its existence signifies an Android checkout.
+ANDROID_ONLY_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),
+                                os.pardir, os.pardir,
+                                'third_party', 'android_tools')
+
+
+def main():
+  # Some Windows bots inadvertently have third_party/android_tools installed,
+  # but are unable to run download_from_google_storage because depot_tools
+  # is not in their path, so avoid failure and bail.
+  if sys.platform == 'win32':
+    return 0
+  if not os.path.exists(ANDROID_ONLY_DIR):
+    return 0
+  subprocess.check_call([
+      'download_from_google_storage',
+      '--no_resume',
+      '--no_auth',
+      '--bucket', 'chromium-doclava',
+      '--extract',
+      '-s',
+      os.path.join('src', 'buildtools', 'android', 'doclava.tar.gz.sha1')])
+  return 0
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/build/android/emma_coverage_stats.py b/build/android/emma_coverage_stats.py
new file mode 100755
index 0000000..20ec8ea
--- /dev/null
+++ b/build/android/emma_coverage_stats.py
@@ -0,0 +1,479 @@
+#!/usr/bin/python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Generates incremental code coverage reports for Java code in Chromium.
+
+Usage:
+
+  build/android/emma_coverage_stats.py -v --out <output file path> --emma-dir
+    <EMMA file directory> --lines-for-coverage-file
+    <path to file containing lines for coverage>
+
+  Creates a JSON representation of the overall and file coverage stats and saves
+  this information to the specified output file.
+"""
+
+import argparse
+import collections
+import json
+import logging
+import os
+import re
+import sys
+from xml.etree import ElementTree
+
+import devil_chromium
+from devil.utils import run_tests_helper
+
+NOT_EXECUTABLE = -1
+NOT_COVERED = 0
+COVERED = 1
+PARTIALLY_COVERED = 2
+
+# Coverage information about a single line of code.
+LineCoverage = collections.namedtuple(
+    'LineCoverage',
+    ['lineno', 'source', 'covered_status', 'fractional_line_coverage'])
+
+
+class _EmmaHtmlParser(object):
+  """Encapsulates HTML file parsing operations.
+
+  This class contains all operations related to parsing HTML files that were
+  produced using the EMMA code coverage tool.
+
+  Example HTML:
+
+  Package links:
+    <a href="_files/1.html">org.chromium.chrome</a>
+    This is returned by the selector |XPATH_SELECT_PACKAGE_ELEMENTS|.
+
+  Class links:
+    <a href="1e.html">DoActivity.java</a>
+    This is returned by the selector |XPATH_SELECT_CLASS_ELEMENTS|.
+
+  Line coverage data:
+    <tr class="p">
+       <td class="l" title="78% line coverage (7 out of 9)">108</td>
+       <td title="78% line coverage (7 out of 9 instructions)">
+         if (index < 0 || index = mSelectors.size()) index = 0;</td>
+    </tr>
+    <tr>
+       <td class="l">109</td>
+       <td> </td>
+    </tr>
+    <tr class="c">
+       <td class="l">110</td>
+       <td>        if (mSelectors.get(index) != null) {</td>
+    </tr>
+    <tr class="z">
+       <td class="l">111</td>
+       <td>            for (int i = 0; i < mSelectors.size(); i++) {</td>
+    </tr>
+    Each <tr> element is returned by the selector |XPATH_SELECT_LOC|.
+
+    We can parse this to get:
+      1. Line number
+      2. Line of source code
+      3. Coverage status (c, z, or p)
+      4. Fractional coverage value (% out of 100 if PARTIALLY_COVERED)
+  """
+  # Selector to match all <a> elements within the rows that are in the table
+  # that displays all of the different packages.
+  _XPATH_SELECT_PACKAGE_ELEMENTS = './/BODY/TABLE[4]/TR/TD/A'
+
+  # Selector to match all <a> elements within the rows that are in the table
+  # that displays all of the different classes within a package.
+  _XPATH_SELECT_CLASS_ELEMENTS = './/BODY/TABLE[3]/TR/TD/A'
+
+  # Selector to match all <tr> elements within the table containing Java source
+  # code in an EMMA HTML file.
+  _XPATH_SELECT_LOC = './/BODY/TABLE[4]/TR'
+
+  # Children of HTML elements are represented as a list in ElementTree. These
+  # constants represent list indices corresponding to relevant child elements.
+
+  # Child 1 contains percentage covered for a line.
+  _ELEMENT_PERCENT_COVERED = 1
+
+  # Child 1 contains the original line of source code.
+  _ELEMENT_CONTAINING_SOURCE_CODE = 1
+
+  # Child 0 contains the line number.
+  _ELEMENT_CONTAINING_LINENO = 0
+
+  # Maps CSS class names to corresponding coverage constants.
+  _CSS_TO_STATUS = {'c': COVERED, 'p': PARTIALLY_COVERED, 'z': NOT_COVERED}
+
+  # UTF-8 no break space.
+  _NO_BREAK_SPACE = '\xc2\xa0'
+
+  def __init__(self, emma_file_base_dir):
+    """Initializes _EmmaHtmlParser.
+
+    Args:
+      emma_file_base_dir: Path to the location where EMMA report files are
+        stored. Should be where index.html is stored.
+    """
+    self._base_dir = emma_file_base_dir
+    self._emma_files_path = os.path.join(self._base_dir, '_files')
+    self._index_path = os.path.join(self._base_dir, 'index.html')
+
+  def GetLineCoverage(self, emma_file_path):
+    """Returns a list of LineCoverage objects for the given EMMA HTML file.
+
+    Args:
+      emma_file_path: String representing the path to the EMMA HTML file.
+
+    Returns:
+      A list of LineCoverage objects.
+    """
+    line_tr_elements = self._FindElements(
+        emma_file_path, self._XPATH_SELECT_LOC)
+    line_coverage = []
+    for tr in line_tr_elements:
+      # Get the coverage status.
+      coverage_status = self._CSS_TO_STATUS.get(tr.get('CLASS'), NOT_EXECUTABLE)
+      # Get the fractional coverage value.
+      if coverage_status == PARTIALLY_COVERED:
+        title_attribute = (tr[self._ELEMENT_PERCENT_COVERED].get('TITLE'))
+        # Parse string that contains percent covered: "83% line coverage ...".
+        percent_covered = title_attribute.split('%')[0]
+        fractional_coverage = int(percent_covered) / 100.0
+      else:
+        fractional_coverage = 1.0
+
+      # Get the line number.
+      lineno_element = tr[self._ELEMENT_CONTAINING_LINENO]
+      # Handles oddly formatted HTML (where there is an extra <a> tag).
+      lineno = int(lineno_element.text or
+                   lineno_element[self._ELEMENT_CONTAINING_LINENO].text)
+      # Get the original line of Java source code.
+      raw_source = tr[self._ELEMENT_CONTAINING_SOURCE_CODE].text
+      utf8_source = raw_source.encode('UTF-8')
+      source = utf8_source.replace(self._NO_BREAK_SPACE, ' ')
+
+      line = LineCoverage(lineno, source, coverage_status, fractional_coverage)
+      line_coverage.append(line)
+
+    return line_coverage
+
+  def GetPackageNameToEmmaFileDict(self):
+    """Returns a dict mapping Java packages to EMMA HTML coverage files.
+
+    Parses the EMMA index.html file to get a list of packages, then parses each
+    package HTML file to get a list of classes for that package, and creates
+    a dict with this info.
+
+    Returns:
+      A dict mapping string representation of Java packages (with class
+        names appended) to the corresponding file paths of EMMA HTML files.
+    """
+    # These <a> elements contain each package name and the path of the file
+    # where all classes within said package are listed.
+    package_link_elements = self._FindElements(
+        self._index_path, self._XPATH_SELECT_PACKAGE_ELEMENTS)
+    # Maps file path of package directory (EMMA generated) to package name.
+    # Example: emma_dir/f.html: org.chromium.chrome.
+    package_links = {
+      os.path.join(self._base_dir, link.attrib['HREF']): link.text
+      for link in package_link_elements if 'HREF' in link.attrib
+    }
+
+    package_to_emma = {}
+    for package_emma_file_path, package_name in package_links.iteritems():
+      # These <a> elements contain each class name in the current package and
+      # the path of the file where the coverage info is stored for each class.
+      coverage_file_link_elements = self._FindElements(
+          package_emma_file_path, self._XPATH_SELECT_CLASS_ELEMENTS)
+
+      for class_name_element in coverage_file_link_elements:
+        emma_coverage_file_path = os.path.join(
+            self._emma_files_path, class_name_element.attrib['HREF'])
+        full_package_name = '%s.%s' % (package_name, class_name_element.text)
+        package_to_emma[full_package_name] = emma_coverage_file_path
+
+    return package_to_emma
+
+  # pylint: disable=no-self-use
+  def _FindElements(self, file_path, xpath_selector):
+    """Reads a HTML file and performs an XPath match.
+
+    Args:
+      file_path: String representing the path to the HTML file.
+      xpath_selector: String representing xpath search pattern.
+
+    Returns:
+      A list of ElementTree.Elements matching the given XPath selector.
+        Returns an empty list if there is no match.
+    """
+    with open(file_path) as f:
+      file_contents = f.read().decode('ISO-8859-1').encode('UTF-8')
+      root = ElementTree.fromstring(file_contents)
+      return root.findall(xpath_selector)
+
+
+class _EmmaCoverageStats(object):
+  """Computes code coverage stats for Java code using the coverage tool EMMA.
+
+  This class provides an API that allows users to capture absolute code coverage
+  and code coverage on a subset of lines for each Java source file. Coverage
+  reports are generated in JSON format.
+  """
+  # Regular expression to get package name from Java package statement.
+  RE_PACKAGE_MATCH_GROUP = 'package'
+  RE_PACKAGE = re.compile(r'package (?P<%s>[\w.]*);' % RE_PACKAGE_MATCH_GROUP)
+
+  def __init__(self, emma_file_base_dir, files_for_coverage):
+    """Initialize _EmmaCoverageStats.
+
+    Args:
+      emma_file_base_dir: String representing the path to the base directory
+        where EMMA HTML coverage files are stored, i.e. parent of index.html.
+      files_for_coverage: A list of Java source code file paths to get EMMA
+        coverage for.
+    """
+    self._emma_parser = _EmmaHtmlParser(emma_file_base_dir)
+    self._source_to_emma = self._GetSourceFileToEmmaFileDict(files_for_coverage)
+
+  def GetCoverageDict(self, lines_for_coverage):
+    """Returns a dict containing detailed coverage information.
+
+    Gets detailed coverage stats for each file specified in the
+    |lines_for_coverage| dict and the total incremental number of lines covered
+    and executable for all files in |lines_for_coverage|.
+
+    Args:
+      lines_for_coverage: A dict mapping Java source file paths to lists of line
+        numbers.
+
+    Returns:
+      A dict containing coverage stats for the given dict of files and lines.
+        Contains absolute coverage stats for each file, coverage stats for each
+        file's lines specified in |lines_for_coverage|, line by line coverage
+        for each file, and overall coverage stats for the lines specified in
+        |lines_for_coverage|.
+    """
+    file_coverage = {}
+    for file_path, line_numbers in lines_for_coverage.iteritems():
+      file_coverage_dict = self.GetCoverageDictForFile(file_path, line_numbers)
+      if file_coverage_dict:
+        file_coverage[file_path] = file_coverage_dict
+      else:
+        logging.warning(
+            'No code coverage data for %s, skipping.', file_path)
+
+    covered_statuses = [s['incremental'] for s in file_coverage.itervalues()]
+    num_covered_lines = sum(s['covered'] for s in covered_statuses)
+    num_total_lines = sum(s['total'] for s in covered_statuses)
+    return {
+      'files': file_coverage,
+      'patch': {
+        'incremental': {
+          'covered': num_covered_lines,
+          'total': num_total_lines
+        }
+      }
+    }
+
+  def GetCoverageDictForFile(self, file_path, line_numbers):
+    """Returns a dict containing detailed coverage info for the given file.
+
+    Args:
+      file_path: The path to the Java source file that we want to create the
+        coverage dict for.
+      line_numbers: A list of integer line numbers to retrieve additional stats
+        for.
+
+    Returns:
+      A dict containing absolute, incremental, and line by line coverage for
+        a file.
+    """
+    if file_path not in self._source_to_emma:
+      return None
+    emma_file = self._source_to_emma[file_path]
+    total_line_coverage = self._emma_parser.GetLineCoverage(emma_file)
+    incremental_line_coverage = [line for line in total_line_coverage
+                                 if line.lineno in line_numbers]
+    line_by_line_coverage = [
+      {
+        'line': line.source,
+        'coverage': line.covered_status,
+        'changed': line.lineno in line_numbers,
+        'fractional_coverage': line.fractional_line_coverage,
+      }
+      for line in total_line_coverage
+    ]
+    total_covered_lines, total_lines = (
+        self.GetSummaryStatsForLines(total_line_coverage))
+    incremental_covered_lines, incremental_total_lines = (
+        self.GetSummaryStatsForLines(incremental_line_coverage))
+
+    file_coverage_stats = {
+      'absolute': {
+        'covered': total_covered_lines,
+        'total': total_lines
+      },
+      'incremental': {
+        'covered': incremental_covered_lines,
+        'total': incremental_total_lines
+      },
+      'source': line_by_line_coverage,
+    }
+    return file_coverage_stats
+
+  # pylint: disable=no-self-use
+  def GetSummaryStatsForLines(self, line_coverage):
+    """Gets summary stats for a given list of LineCoverage objects.
+
+    Args:
+      line_coverage: A list of LineCoverage objects.
+
+    Returns:
+      A tuple containing the number of lines that are covered and the total
+        number of lines that are executable, respectively
+    """
+    partially_covered_sum = 0
+    covered_status_totals = {COVERED: 0, NOT_COVERED: 0, PARTIALLY_COVERED: 0}
+    for line in line_coverage:
+      status = line.covered_status
+      if status == NOT_EXECUTABLE:
+        continue
+      covered_status_totals[status] += 1
+      if status == PARTIALLY_COVERED:
+        partially_covered_sum += line.fractional_line_coverage
+
+    total_covered = covered_status_totals[COVERED] + partially_covered_sum
+    total_lines = sum(covered_status_totals.values())
+    return total_covered, total_lines
+
+  def _GetSourceFileToEmmaFileDict(self, files):
+    """Gets a dict used to correlate Java source files with EMMA HTML files.
+
+    This method gathers the information needed to correlate EMMA HTML
+    files with Java source files. EMMA XML and plain text reports do not provide
+    line by line coverage data, so HTML reports must be used instead.
+    Unfortunately, the HTML files that are created are given garbage names
+    (i.e 1.html) so we need to manually correlate EMMA HTML files
+    with the original Java source files.
+
+    Args:
+      files: A list of file names for which coverage information is desired.
+
+    Returns:
+      A dict mapping Java source file paths to EMMA HTML file paths.
+    """
+    # Maps Java source file paths to package names.
+    # Example: /usr/code/file.java -> org.chromium.file.java.
+    source_to_package = {}
+    for file_path in files:
+      package = self.GetPackageNameFromFile(file_path)
+      if package:
+        source_to_package[file_path] = package
+      else:
+        logging.warning("Skipping %s because it doesn\'t have a package "
+                        "statement.", file_path)
+
+    # Maps package names to EMMA report HTML files.
+    # Example: org.chromium.file.java -> out/coverage/1a.html.
+    package_to_emma = self._emma_parser.GetPackageNameToEmmaFileDict()
+    # Finally, we have a dict mapping Java file paths to EMMA report files.
+    # Example: /usr/code/file.java -> out/coverage/1a.html.
+    source_to_emma = {source: package_to_emma[package]
+                      for source, package in source_to_package.iteritems()
+                      if package in package_to_emma}
+    return source_to_emma
+
+  @staticmethod
+  def NeedsCoverage(file_path):
+    """Checks to see if the file needs to be analyzed for code coverage.
+
+    Args:
+      file_path: A string representing path to the file.
+
+    Returns:
+      True for Java files that exist, False for all others.
+    """
+    if os.path.splitext(file_path)[1] == '.java' and os.path.exists(file_path):
+      return True
+    else:
+      logging.info('Skipping file %s, cannot compute code coverage.', file_path)
+      return False
+
+  @staticmethod
+  def GetPackageNameFromFile(file_path):
+    """Gets the full package name including the file name for a given file path.
+
+    Args:
+      file_path: String representing the path to the Java source file.
+
+    Returns:
+      A string representing the full package name with file name appended or
+        None if there is no package statement in the file.
+    """
+    with open(file_path) as f:
+      file_content = f.read()
+      package_match = re.search(_EmmaCoverageStats.RE_PACKAGE, file_content)
+      if package_match:
+        package = package_match.group(_EmmaCoverageStats.RE_PACKAGE_MATCH_GROUP)
+        file_name = os.path.basename(file_path)
+        return '%s.%s' % (package, file_name)
+      else:
+        return None
+
+
+def GenerateCoverageReport(line_coverage_file, out_file_path, coverage_dir):
+  """Generates a coverage report for a given set of lines.
+
+  Writes the results of the coverage analysis to the file specified by
+  |out_file_path|.
+
+  Args:
+    line_coverage_file: The path to a file which contains a dict mapping file
+      names to lists of line numbers. Example: {file1: [1, 2, 3], ...} means
+      that we should compute coverage information on lines 1 - 3 for file1.
+    out_file_path: A string representing the location to write the JSON report.
+    coverage_dir: A string representing the file path where the EMMA
+      HTML coverage files are located (i.e. folder where index.html is located).
+  """
+  with open(line_coverage_file) as f:
+    potential_files_for_coverage = json.load(f)
+
+  files_for_coverage = {f: lines
+                        for f, lines in potential_files_for_coverage.iteritems()
+                        if _EmmaCoverageStats.NeedsCoverage(f)}
+
+  coverage_results = {}
+  if files_for_coverage:
+    code_coverage = _EmmaCoverageStats(coverage_dir, files_for_coverage.keys())
+    coverage_results = code_coverage.GetCoverageDict(files_for_coverage)
+  else:
+    logging.info('No Java files requiring coverage were included in %s.',
+                 line_coverage_file)
+
+  with open(out_file_path, 'w+') as out_status_file:
+    json.dump(coverage_results, out_status_file)
+
+
+def main():
+  argparser = argparse.ArgumentParser()
+  argparser.add_argument('--out', required=True, type=str,
+                         help='Report output file path.')
+  argparser.add_argument('--emma-dir', required=True, type=str,
+                         help='EMMA HTML report directory.')
+  argparser.add_argument('--lines-for-coverage-file', required=True, type=str,
+                         help='File containing a JSON object. Should contain a '
+                         'dict mapping file names to lists of line numbers of '
+                         'code for which coverage information is desired.')
+  argparser.add_argument('-v', '--verbose', action='count',
+                         help='Print verbose log information.')
+  args = argparser.parse_args()
+  run_tests_helper.SetLogLevel(args.verbose)
+  devil_chromium.Initialize()
+  GenerateCoverageReport(args.lines_for_coverage_file, args.out, args.emma_dir)
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/build/android/emma_coverage_stats_test.py b/build/android/emma_coverage_stats_test.py
new file mode 100755
index 0000000..30b409e
--- /dev/null
+++ b/build/android/emma_coverage_stats_test.py
@@ -0,0 +1,563 @@
+#!/usr/bin/python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=protected-access
+
+import unittest
+from xml.etree import ElementTree
+
+import emma_coverage_stats
+from pylib.constants import host_paths
+
+with host_paths.SysPath(host_paths.PYMOCK_PATH):
+  import mock  # pylint: disable=import-error
+
+EMPTY_COVERAGE_STATS_DICT = {
+  'files': {},
+  'patch': {
+    'incremental': {
+      'covered': 0, 'total': 0
+    }
+  }
+}
+
+
+class _EmmaHtmlParserTest(unittest.TestCase):
+  """Tests for _EmmaHtmlParser.
+
+  Uses modified EMMA report HTML that contains only the subset of tags needed
+  for test verification.
+  """
+
+  def setUp(self):
+    self.emma_dir = 'fake/dir/'
+    self.parser = emma_coverage_stats._EmmaHtmlParser(self.emma_dir)
+    self.simple_html = '<TR><TD CLASS="p">Test HTML</TD></TR>'
+    self.index_html = (
+      '<HTML>'
+        '<BODY>'
+          '<TABLE CLASS="hdft" CELLSPACING="0" WIDTH="100%">'
+          '</TABLE>'
+          '<TABLE CELLSPACING="0" WIDTH="100%">'
+          '</TABLE>'
+          '<TABLE CLASS="it" CELLSPACING="0">'
+          '</TABLE>'
+          '<TABLE CELLSPACING="0" WIDTH="100%">'
+            '<TR>'
+              '<TH CLASS="f">name</TH>'
+              '<TH>class, %</TH>'
+              '<TH>method, %</TH>'
+              '<TH>block, %</TH>'
+              '<TH>line, %</TH>'
+            '</TR>'
+            '<TR CLASS="o">'
+              '<TD><A HREF="_files/0.html"'
+              '>org.chromium.chrome.browser</A></TD>'
+              '<TD CLASS="h">0%   (0/3)</TD>'
+            '</TR>'
+            '<TR>'
+              '<TD><A HREF="_files/1.html"'
+              '>org.chromium.chrome.browser.tabmodel</A></TD>'
+              '<TD CLASS="h">0%   (0/8)</TD>'
+            '</TR>'
+          '</TABLE>'
+          '<TABLE CLASS="hdft" CELLSPACING="0" WIDTH="100%">'
+          '</TABLE>'
+        '</BODY>'
+      '</HTML>'
+    )
+    self.package_1_class_list_html = (
+      '<HTML>'
+        '<BODY>'
+          '<TABLE CLASS="hdft" CELLSPACING="0" WIDTH="100%">'
+          '</TABLE>'
+          '<TABLE CELLSPACING="0" WIDTH="100%">'
+          '</TABLE>'
+          '<TABLE CELLSPACING="0" WIDTH="100%">'
+            '<TR>'
+              '<TH CLASS="f">name</TH>'
+              '<TH>class, %</TH>'
+              '<TH>method, %</TH>'
+              '<TH>block, %</TH>'
+              '<TH>line, %</TH>'
+            '</TR>'
+            '<TR CLASS="o">'
+              '<TD><A HREF="1e.html">IntentHelper.java</A></TD>'
+              '<TD CLASS="h">0%   (0/3)</TD>'
+              '<TD CLASS="h">0%   (0/9)</TD>'
+              '<TD CLASS="h">0%   (0/97)</TD>'
+              '<TD CLASS="h">0%   (0/26)</TD>'
+            '</TR>'
+          '</TABLE>'
+          '<TABLE CLASS="hdft" CELLSPACING="0" WIDTH="100%">'
+          '</TABLE>'
+        '</BODY>'
+      '</HTML>'
+    )
+    self.package_2_class_list_html = (
+      '<HTML>'
+        '<BODY>'
+          '<TABLE CLASS="hdft" CELLSPACING="0" WIDTH="100%">'
+          '</TABLE>'
+          '<TABLE CELLSPACING="0" WIDTH="100%">'
+          '</TABLE>'
+          '<TABLE CELLSPACING="0" WIDTH="100%">'
+            '<TR>'
+              '<TH CLASS="f">name</TH>'
+              '<TH>class, %</TH>'
+              '<TH>method, %</TH>'
+              '<TH>block, %</TH>'
+              '<TH>line, %</TH>'
+            '</TR>'
+            '<TR CLASS="o">'
+              '<TD><A HREF="1f.html">ContentSetting.java</A></TD>'
+              '<TD CLASS="h">0%   (0/1)</TD>'
+            '</TR>'
+            '<TR>'
+              '<TD><A HREF="20.html">DevToolsServer.java</A></TD>'
+            '</TR>'
+            '<TR CLASS="o">'
+              '<TD><A HREF="21.html">FileProviderHelper.java</A></TD>'
+            '</TR>'
+            '<TR>'
+              '<TD><A HREF="22.html">ContextualMenuBar.java</A></TD>'
+            '</TR>'
+            '<TR CLASS="o">'
+              '<TD><A HREF="23.html">AccessibilityUtil.java</A></TD>'
+            '</TR>'
+            '<TR>'
+              '<TD><A HREF="24.html">NavigationPopup.java</A></TD>'
+            '</TR>'
+          '</TABLE>'
+          '<TABLE CLASS="hdft" CELLSPACING="0" WIDTH="100%">'
+          '</TABLE>'
+        '</BODY>'
+      '</HTML>'
+    )
+    self.partially_covered_tr_html = (
+      '<TR CLASS="p">'
+        '<TD CLASS="l" TITLE="78% line coverage (7 out of 9)">108</TD>'
+        '<TD TITLE="78% line coverage (7 out of 9 instructions)">'
+          'if (index &lt; 0 || index = mSelectors.size()) index = 0;</TD>'
+      '</TR>'
+    )
+    self.covered_tr_html = (
+      '<TR CLASS="c">'
+        '<TD CLASS="l">110</TD>'
+        '<TD>        if (mSelectors.get(index) != null) {</TD>'
+      '</TR>'
+    )
+    self.not_executable_tr_html = (
+      '<TR>'
+        '<TD CLASS="l">109</TD>'
+        '<TD> </TD>'
+      '</TR>'
+    )
+    self.tr_with_extra_a_tag = (
+      '<TR CLASS="z">'
+        '<TD CLASS="l">'
+          '<A name="1f">54</A>'
+        '</TD>'
+        '<TD>            }</TD>'
+      '</TR>'
+    )
+
+  def testInit(self):
+    emma_dir = self.emma_dir
+    parser = emma_coverage_stats._EmmaHtmlParser(emma_dir)
+    self.assertEqual(parser._base_dir, emma_dir)
+    self.assertEqual(parser._emma_files_path, 'fake/dir/_files')
+    self.assertEqual(parser._index_path, 'fake/dir/index.html')
+
+  def testFindElements_basic(self):
+    read_values = [self.simple_html]
+    found, _ = MockOpenForFunction(self.parser._FindElements, read_values,
+                                   file_path='fake', xpath_selector='.//TD')
+    self.assertIs(type(found), list)
+    self.assertIs(type(found[0]), ElementTree.Element)
+    self.assertEqual(found[0].text, 'Test HTML')
+
+  def testFindElements_multipleElements(self):
+    multiple_trs = self.not_executable_tr_html + self.covered_tr_html
+    read_values = ['<div>' + multiple_trs + '</div>']
+    found, _ = MockOpenForFunction(self.parser._FindElements, read_values,
+                                   file_path='fake', xpath_selector='.//TR')
+    self.assertEquals(2, len(found))
+
+  def testFindElements_noMatch(self):
+    read_values = [self.simple_html]
+    found, _ = MockOpenForFunction(self.parser._FindElements, read_values,
+                                   file_path='fake', xpath_selector='.//TR')
+    self.assertEqual(found, [])
+
+  def testFindElements_badFilePath(self):
+    with self.assertRaises(IOError):
+      with mock.patch('os.path.exists', return_value=False):
+        self.parser._FindElements('fake', xpath_selector='//tr')
+
+  def testGetPackageNameToEmmaFileDict_basic(self):
+    expected_dict = {
+      'org.chromium.chrome.browser.AccessibilityUtil.java':
+      'fake/dir/_files/23.html',
+      'org.chromium.chrome.browser.ContextualMenuBar.java':
+      'fake/dir/_files/22.html',
+      'org.chromium.chrome.browser.tabmodel.IntentHelper.java':
+      'fake/dir/_files/1e.html',
+      'org.chromium.chrome.browser.ContentSetting.java':
+      'fake/dir/_files/1f.html',
+      'org.chromium.chrome.browser.DevToolsServer.java':
+      'fake/dir/_files/20.html',
+      'org.chromium.chrome.browser.NavigationPopup.java':
+      'fake/dir/_files/24.html',
+      'org.chromium.chrome.browser.FileProviderHelper.java':
+      'fake/dir/_files/21.html'}
+
+    read_values = [self.index_html, self.package_1_class_list_html,
+                   self.package_2_class_list_html]
+    return_dict, mock_open = MockOpenForFunction(
+        self.parser.GetPackageNameToEmmaFileDict, read_values)
+
+    self.assertDictEqual(return_dict, expected_dict)
+    self.assertEqual(mock_open.call_count, 3)
+    calls = [mock.call('fake/dir/index.html'),
+             mock.call('fake/dir/_files/1.html'),
+             mock.call('fake/dir/_files/0.html')]
+    mock_open.assert_has_calls(calls)
+
+  def testGetPackageNameToEmmaFileDict_noPackageElements(self):
+    self.parser._FindElements = mock.Mock(return_value=[])
+    return_dict = self.parser.GetPackageNameToEmmaFileDict()
+    self.assertDictEqual({}, return_dict)
+
+  def testGetLineCoverage_status_basic(self):
+    line_coverage = self.GetLineCoverageWithFakeElements([self.covered_tr_html])
+    self.assertEqual(line_coverage[0].covered_status,
+                     emma_coverage_stats.COVERED)
+
+  def testGetLineCoverage_status_statusMissing(self):
+    line_coverage = self.GetLineCoverageWithFakeElements(
+        [self.not_executable_tr_html])
+    self.assertEqual(line_coverage[0].covered_status,
+                     emma_coverage_stats.NOT_EXECUTABLE)
+
+  def testGetLineCoverage_fractionalCoverage_basic(self):
+    line_coverage = self.GetLineCoverageWithFakeElements([self.covered_tr_html])
+    self.assertEqual(line_coverage[0].fractional_line_coverage, 1.0)
+
+  def testGetLineCoverage_fractionalCoverage_partial(self):
+    line_coverage = self.GetLineCoverageWithFakeElements(
+        [self.partially_covered_tr_html])
+    self.assertEqual(line_coverage[0].fractional_line_coverage, 0.78)
+
+  def testGetLineCoverage_lineno_basic(self):
+    line_coverage = self.GetLineCoverageWithFakeElements([self.covered_tr_html])
+    self.assertEqual(line_coverage[0].lineno, 110)
+
+  def testGetLineCoverage_lineno_withAlternativeHtml(self):
+    line_coverage = self.GetLineCoverageWithFakeElements(
+        [self.tr_with_extra_a_tag])
+    self.assertEqual(line_coverage[0].lineno, 54)
+
+  def testGetLineCoverage_source(self):
+    self.parser._FindElements = mock.Mock(
+        return_value=[ElementTree.fromstring(self.covered_tr_html)])
+    line_coverage = self.parser.GetLineCoverage('fake_path')
+    self.assertEqual(line_coverage[0].source,
+                     '        if (mSelectors.get(index) != null) {')
+
+  def testGetLineCoverage_multipleElements(self):
+    line_coverage = self.GetLineCoverageWithFakeElements(
+        [self.covered_tr_html, self.partially_covered_tr_html,
+         self.tr_with_extra_a_tag])
+    self.assertEqual(len(line_coverage), 3)
+
+  def GetLineCoverageWithFakeElements(self, html_elements):
+    """Wraps GetLineCoverage so mock HTML can easily be used.
+
+    Args:
+      html_elements: List of strings each representing an HTML element.
+
+    Returns:
+      A list of LineCoverage objects.
+    """
+    elements = [ElementTree.fromstring(string) for string in html_elements]
+    with mock.patch('emma_coverage_stats._EmmaHtmlParser._FindElements',
+                    return_value=elements):
+      return self.parser.GetLineCoverage('fake_path')
+
+
+class _EmmaCoverageStatsTest(unittest.TestCase):
+  """Tests for _EmmaCoverageStats."""
+
+  def setUp(self):
+    self.good_source_to_emma = {
+      '/path/to/1/File1.java': '/emma/1.html',
+      '/path/2/File2.java': '/emma/2.html',
+      '/path/2/File3.java': '/emma/3.html'
+    }
+    self.line_coverage = [
+        emma_coverage_stats.LineCoverage(
+            1, '', emma_coverage_stats.COVERED, 1.0),
+        emma_coverage_stats.LineCoverage(
+            2, '', emma_coverage_stats.COVERED, 1.0),
+        emma_coverage_stats.LineCoverage(
+            3, '', emma_coverage_stats.NOT_EXECUTABLE, 1.0),
+        emma_coverage_stats.LineCoverage(
+            4, '', emma_coverage_stats.NOT_COVERED, 1.0),
+        emma_coverage_stats.LineCoverage(
+            5, '', emma_coverage_stats.PARTIALLY_COVERED, 0.85),
+        emma_coverage_stats.LineCoverage(
+            6, '', emma_coverage_stats.PARTIALLY_COVERED, 0.20)
+    ]
+    self.lines_for_coverage = [1, 3, 5, 6]
+    with mock.patch('emma_coverage_stats._EmmaHtmlParser._FindElements',
+                    return_value=[]):
+      self.simple_coverage = emma_coverage_stats._EmmaCoverageStats(
+          'fake_dir', {})
+
+  def testInit(self):
+    coverage_stats = self.simple_coverage
+    self.assertIsInstance(coverage_stats._emma_parser,
+                          emma_coverage_stats._EmmaHtmlParser)
+    self.assertIsInstance(coverage_stats._source_to_emma, dict)
+
+  def testNeedsCoverage_withExistingJavaFile(self):
+    test_file = '/path/to/file/File.java'
+    with mock.patch('os.path.exists', return_value=True):
+      self.assertTrue(
+          emma_coverage_stats._EmmaCoverageStats.NeedsCoverage(test_file))
+
+  def testNeedsCoverage_withNonJavaFile(self):
+    test_file = '/path/to/file/File.c'
+    with mock.patch('os.path.exists', return_value=True):
+      self.assertFalse(
+          emma_coverage_stats._EmmaCoverageStats.NeedsCoverage(test_file))
+
+  def testNeedsCoverage_fileDoesNotExist(self):
+    test_file = '/path/to/file/File.java'
+    with mock.patch('os.path.exists', return_value=False):
+      self.assertFalse(
+          emma_coverage_stats._EmmaCoverageStats.NeedsCoverage(test_file))
+
+  def testGetPackageNameFromFile_basic(self):
+    test_file_text = """// Test Copyright
+    package org.chromium.chrome.browser;
+    import android.graphics.RectF;"""
+    result_package, _ = MockOpenForFunction(
+        emma_coverage_stats._EmmaCoverageStats.GetPackageNameFromFile,
+        [test_file_text], file_path='/path/to/file/File.java')
+    self.assertEqual(result_package, 'org.chromium.chrome.browser.File.java')
+
+  def testGetPackageNameFromFile_noPackageStatement(self):
+    result_package, _ = MockOpenForFunction(
+        emma_coverage_stats._EmmaCoverageStats.GetPackageNameFromFile,
+        ['not a package statement'], file_path='/path/to/file/File.java')
+    self.assertIsNone(result_package)
+
+  def testGetSummaryStatsForLines_basic(self):
+    covered, total = self.simple_coverage.GetSummaryStatsForLines(
+        self.line_coverage)
+    self.assertEqual(covered, 3.05)
+    self.assertEqual(total, 5)
+
+  def testGetSourceFileToEmmaFileDict(self):
+    package_names = {
+      '/path/to/1/File1.java': 'org.fake.one.File1.java',
+      '/path/2/File2.java': 'org.fake.File2.java',
+      '/path/2/File3.java': 'org.fake.File3.java'
+    }
+    package_to_emma = {
+      'org.fake.one.File1.java': '/emma/1.html',
+      'org.fake.File2.java': '/emma/2.html',
+      'org.fake.File3.java': '/emma/3.html'
+    }
+    with mock.patch('os.path.exists', return_value=True):
+      coverage_stats = self.simple_coverage
+      coverage_stats._emma_parser.GetPackageNameToEmmaFileDict = mock.MagicMock(
+          return_value=package_to_emma)
+      coverage_stats.GetPackageNameFromFile = lambda x: package_names[x]
+      result_dict = coverage_stats._GetSourceFileToEmmaFileDict(
+          package_names.keys())
+    self.assertDictEqual(result_dict, self.good_source_to_emma)
+
+  def testGetCoverageDictForFile(self):
+    line_coverage = self.line_coverage
+    self.simple_coverage._emma_parser.GetLineCoverage = lambda x: line_coverage
+    self.simple_coverage._source_to_emma = {'/fake/src': 'fake/emma'}
+    lines = self.lines_for_coverage
+    expected_dict = {
+      'absolute': {
+        'covered': 3.05,
+        'total': 5
+      },
+      'incremental': {
+        'covered': 2.05,
+        'total': 3
+      },
+      'source': [
+        {
+          'line': line_coverage[0].source,
+          'coverage': line_coverage[0].covered_status,
+          'changed': True,
+          'fractional_coverage': line_coverage[0].fractional_line_coverage,
+        },
+        {
+          'line': line_coverage[1].source,
+          'coverage': line_coverage[1].covered_status,
+          'changed': False,
+          'fractional_coverage': line_coverage[1].fractional_line_coverage,
+        },
+        {
+          'line': line_coverage[2].source,
+          'coverage': line_coverage[2].covered_status,
+          'changed': True,
+          'fractional_coverage': line_coverage[2].fractional_line_coverage,
+        },
+        {
+          'line': line_coverage[3].source,
+          'coverage': line_coverage[3].covered_status,
+          'changed': False,
+          'fractional_coverage': line_coverage[3].fractional_line_coverage,
+        },
+        {
+          'line': line_coverage[4].source,
+          'coverage': line_coverage[4].covered_status,
+          'changed': True,
+          'fractional_coverage': line_coverage[4].fractional_line_coverage,
+        },
+        {
+          'line': line_coverage[5].source,
+          'coverage': line_coverage[5].covered_status,
+          'changed': True,
+          'fractional_coverage': line_coverage[5].fractional_line_coverage,
+        }
+      ]
+    }
+    result_dict = self.simple_coverage.GetCoverageDictForFile(
+        '/fake/src', lines)
+    self.assertDictEqual(result_dict, expected_dict)
+
+  def testGetCoverageDictForFile_emptyCoverage(self):
+    expected_dict = {
+      'absolute': {'covered': 0, 'total': 0},
+      'incremental': {'covered': 0, 'total': 0},
+      'source': []
+    }
+    self.simple_coverage._emma_parser.GetLineCoverage = lambda x: []
+    self.simple_coverage._source_to_emma = {'fake_dir': 'fake/emma'}
+    result_dict = self.simple_coverage.GetCoverageDictForFile('fake_dir', {})
+    self.assertDictEqual(result_dict, expected_dict)
+
+  def testGetCoverageDictForFile_missingCoverage(self):
+    self.simple_coverage._source_to_emma = {}
+    result_dict = self.simple_coverage.GetCoverageDictForFile('fake_file', {})
+    self.assertIsNone(result_dict)
+
+  def testGetCoverageDict_basic(self):
+    files_for_coverage = {
+      '/path/to/1/File1.java': [1, 3, 4],
+      '/path/2/File2.java': [1, 2]
+    }
+    self.simple_coverage._source_to_emma = {
+      '/path/to/1/File1.java': 'emma_1',
+      '/path/2/File2.java': 'emma_2'
+    }
+    coverage_info = {
+      'emma_1': [
+        emma_coverage_stats.LineCoverage(
+            1, '', emma_coverage_stats.COVERED, 1.0),
+        emma_coverage_stats.LineCoverage(
+            2, '', emma_coverage_stats.PARTIALLY_COVERED, 0.5),
+        emma_coverage_stats.LineCoverage(
+            3, '', emma_coverage_stats.NOT_EXECUTABLE, 1.0),
+        emma_coverage_stats.LineCoverage(
+            4, '', emma_coverage_stats.COVERED, 1.0)
+      ],
+      'emma_2': [
+        emma_coverage_stats.LineCoverage(
+            1, '', emma_coverage_stats.NOT_COVERED, 1.0),
+        emma_coverage_stats.LineCoverage(
+            2, '', emma_coverage_stats.COVERED, 1.0)
+      ]
+    }
+    expected_dict = {
+      'files': {
+        '/path/2/File2.java': {
+          'absolute': {'covered': 1, 'total': 2},
+          'incremental': {'covered': 1, 'total': 2},
+          'source': [{'changed': True, 'coverage': 0,
+                      'line': '', 'fractional_coverage': 1.0},
+                     {'changed': True, 'coverage': 1,
+                      'line': '', 'fractional_coverage': 1.0}]
+        },
+        '/path/to/1/File1.java': {
+          'absolute': {'covered': 2.5, 'total': 3},
+          'incremental': {'covered': 2, 'total': 2},
+          'source': [{'changed': True, 'coverage': 1,
+                      'line': '', 'fractional_coverage': 1.0},
+                     {'changed': False, 'coverage': 2,
+                      'line': '', 'fractional_coverage': 0.5},
+                     {'changed': True, 'coverage': -1,
+                      'line': '', 'fractional_coverage': 1.0},
+                     {'changed': True, 'coverage': 1,
+                      'line': '', 'fractional_coverage': 1.0}]
+        }
+      },
+      'patch': {'incremental': {'covered': 3, 'total': 4}}
+    }
+    # Return the relevant coverage info for each file.
+    self.simple_coverage._emma_parser.GetLineCoverage = (
+        lambda x: coverage_info[x])
+    result_dict = self.simple_coverage.GetCoverageDict(files_for_coverage)
+    self.assertDictEqual(result_dict, expected_dict)
+
+  def testGetCoverageDict_noCoverage(self):
+    result_dict = self.simple_coverage.GetCoverageDict({})
+    self.assertDictEqual(result_dict, EMPTY_COVERAGE_STATS_DICT)
+
+
+class EmmaCoverageStatsGenerateCoverageReport(unittest.TestCase):
+  """Tests for GenerateCoverageReport."""
+
+  def testGenerateCoverageReport_missingJsonFile(self):
+    with self.assertRaises(IOError):
+      with mock.patch('os.path.exists', return_value=False):
+        emma_coverage_stats.GenerateCoverageReport('', '', '')
+
+  def testGenerateCoverageReport_invalidJsonFile(self):
+    with self.assertRaises(ValueError):
+      with mock.patch('os.path.exists', return_value=True):
+        MockOpenForFunction(emma_coverage_stats.GenerateCoverageReport, [''],
+                            line_coverage_file='', out_file_path='',
+                            coverage_dir='')
+
+
+def MockOpenForFunction(func, side_effects, **kwargs):
+  """Allows easy mock open and read for callables that open multiple files.
+
+  Will mock the python open function in a way such that each time read() is
+  called on an open file, the next element in |side_effects| is returned. This
+  makes it easier to test functions that call open() multiple times.
+
+  Args:
+    func: The callable to invoke once mock files are setup.
+    side_effects: A list of return values for each file to return once read.
+      Length of list should be equal to the number calls to open in |func|.
+    **kwargs: Keyword arguments to be passed to |func|.
+
+  Returns:
+    A tuple containing the return value of |func| and the MagicMock object used
+      to mock all calls to open respectively.
+  """
+  mock_open = mock.mock_open()
+  mock_open.side_effect = [mock.mock_open(read_data=side_effect).return_value
+                           for side_effect in side_effects]
+  with mock.patch('__builtin__.open', mock_open):
+    return func(**kwargs), mock_open
+
+
+if __name__ == '__main__':
+  # Suppress logging messages.
+  unittest.main(buffer=True)
diff --git a/build/android/emma_instr_action.gypi b/build/android/emma_instr_action.gypi
new file mode 100644
index 0000000..0505eab
--- /dev/null
+++ b/build/android/emma_instr_action.gypi
@@ -0,0 +1,46 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This file is meant to be included into an action to provide a rule that
+# instruments either java class files, or jars.
+
+{
+  'variables': {
+    'input_path%': '',
+    'output_path%': '',
+    'stamp_path%': '',
+    'extra_instr_args': [
+      '--coverage-file=<(coverage_file)',
+      '--sources-list-file=<(sources_list_file)',
+    ],
+    'emma_jar': '<(android_sdk_root)/tools/lib/emma.jar',
+    'conditions': [
+      ['emma_instrument != 0', {
+        'extra_instr_args': [
+          '--source-dirs=<(java_in_dir)/src >(additional_src_dirs) >(generated_src_dirs)',
+          '--src-root=<(DEPTH)',
+          '--emma-jar=<(emma_jar)',
+          '--filter-string=<(emma_filter)',
+        ],
+        'instr_action': 'instrument_jar',
+      }, {
+        'instr_action': 'copy',
+        'extra_instr_args': [],
+      }]
+    ]
+  },
+  'inputs': [
+    '<(DEPTH)/build/android/gyp/emma_instr.py',
+    '<(DEPTH)/build/android/gyp/util/build_utils.py',
+    '<(DEPTH)/build/android/pylib/utils/command_option_parser.py',
+  ],
+  'action': [
+    'python', '<(DEPTH)/build/android/gyp/emma_instr.py',
+    '<(instr_action)',
+    '--input-path=<(input_path)',
+    '--output-path=<(output_path)',
+    '--stamp=<(stamp_path)',
+    '<@(extra_instr_args)',
+  ]
+}
diff --git a/build/android/empty/src/.keep b/build/android/empty/src/.keep
new file mode 100644
index 0000000..0f710b6
--- /dev/null
+++ b/build/android/empty/src/.keep
@@ -0,0 +1,6 @@
+This is a file that needs to live here until http://crbug.com/158155 has
+been fixed.
+
+The ant build system requires that a src folder is always present, and for
+some of our targets that is not the case. Giving it an empty src-folder works
+nicely though.
diff --git a/build/android/empty_proguard.flags b/build/android/empty_proguard.flags
new file mode 100644
index 0000000..53484fe
--- /dev/null
+++ b/build/android/empty_proguard.flags
@@ -0,0 +1 @@
+# Used for apk targets that do not need proguard. See build/java_apk.gypi.
diff --git a/build/android/enable_asserts.py b/build/android/enable_asserts.py
new file mode 100755
index 0000000..b303eda
--- /dev/null
+++ b/build/android/enable_asserts.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Enables dalvik vm asserts in the android device."""
+
+import argparse
+import sys
+
+import devil_chromium
+from devil.android import device_blacklist
+from devil.android import device_utils
+
+
+def main():
+  parser = argparse.ArgumentParser()
+
+  parser.add_argument('--blacklist-file', help='Device blacklist JSON file.')
+
+  set_asserts_group = parser.add_mutually_exclusive_group(required=True)
+  set_asserts_group.add_argument(
+      '--enable_asserts', dest='set_asserts', action='store_true',
+      help='Sets the dalvik.vm.enableassertions property to "all"')
+  set_asserts_group.add_argument(
+      '--disable_asserts', dest='set_asserts', action='store_false',
+      help='Removes the dalvik.vm.enableassertions property')
+
+  args = parser.parse_args()
+
+  devil_chromium.Initialize()
+
+  blacklist = (device_blacklist.Blacklist(args.blacklist_file)
+               if args.blacklist_file
+               else None)
+
+  # TODO(jbudorick): Accept optional serial number and run only for the
+  # specified device when present.
+  devices = device_utils.DeviceUtils.parallel(
+      device_utils.DeviceUtils.HealthyDevices(blacklist))
+
+  def set_java_asserts_and_restart(device):
+    if device.SetJavaAsserts(args.set_asserts):
+      device.RunShellCommand('stop')
+      device.RunShellCommand('start')
+
+  devices.pMap(set_java_asserts_and_restart)
+  return 0
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/build/android/envsetup.sh b/build/android/envsetup.sh
new file mode 100755
index 0000000..0545330
--- /dev/null
+++ b/build/android/envsetup.sh
@@ -0,0 +1,62 @@
+#!/bin/bash
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Sets up environment for building Chromium on Android.
+
+# Make sure we're being sourced (possibly by another script). Check for bash
+# since zsh sets $0 when sourcing.
+if [[ -n "$BASH_VERSION" && "${BASH_SOURCE:-$0}" == "$0" ]]; then
+  echo "ERROR: envsetup must be sourced."
+  exit 1
+fi
+
+# This only exists to set local variables. Don't call this manually.
+android_envsetup_main() {
+  local SCRIPT_PATH="$1"
+  local SCRIPT_DIR="$(dirname "$SCRIPT_PATH")"
+
+  local CURRENT_DIR="$(readlink -f "${SCRIPT_DIR}/../../")"
+  if [[ -z "${CHROME_SRC}" ]]; then
+    # If $CHROME_SRC was not set, assume current directory is CHROME_SRC.
+    local CHROME_SRC="${CURRENT_DIR}"
+  fi
+
+  if [[ "${CURRENT_DIR/"${CHROME_SRC}"/}" == "${CURRENT_DIR}" ]]; then
+    # If current directory is not in $CHROME_SRC, it might be set for other
+    # source tree. If $CHROME_SRC was set correctly and we are in the correct
+    # directory, "${CURRENT_DIR/"${CHROME_SRC}"/}" will be "".
+    # Otherwise, it will equal to "${CURRENT_DIR}"
+    echo "Warning: Current directory is out of CHROME_SRC, it may not be \
+  the one you want."
+    echo "${CHROME_SRC}"
+  fi
+
+  # Allow the caller to override a few environment variables. If any of them is
+  # unset, we default to a sane value that's known to work. This allows for
+  # experimentation with a custom SDK.
+  if [[ -z "${ANDROID_SDK_ROOT}" || ! -d "${ANDROID_SDK_ROOT}" ]]; then
+    local ANDROID_SDK_ROOT="${CHROME_SRC}/third_party/android_tools/sdk/"
+  fi
+
+  # Add Android SDK tools to system path.
+  export PATH=$PATH:${ANDROID_SDK_ROOT}/platform-tools
+
+  # Add Android utility tools to the system path.
+  export PATH=$PATH:${ANDROID_SDK_ROOT}/tools/
+
+  # Add Chromium Android development scripts to system path.
+  # Must be after CHROME_SRC is set.
+  export PATH=$PATH:${CHROME_SRC}/build/android
+
+  export ENVSETUP_GYP_CHROME_SRC=${CHROME_SRC}  # TODO(thakis): Remove.
+}
+# In zsh, $0 is the name of the file being sourced.
+android_envsetup_main "${BASH_SOURCE:-$0}"
+unset -f android_envsetup_main
+
+android_gyp() {
+  echo "Please call build/gyp_chromium instead. android_gyp is going away."
+  "${ENVSETUP_GYP_CHROME_SRC}/build/gyp_chromium" --depth="${ENVSETUP_GYP_CHROME_SRC}" --check "$@"
+}
diff --git a/build/android/finalize_apk_action.gypi b/build/android/finalize_apk_action.gypi
new file mode 100644
index 0000000..644f9e8
--- /dev/null
+++ b/build/android/finalize_apk_action.gypi
@@ -0,0 +1,49 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This file is meant to be included into an action to provide an action that
+# signs and zipaligns an APK.
+#
+# To use this, create a gyp action with the following form:
+#  {
+#    'action_name': 'some descriptive action name',
+#    'variables': {
+#      'input_apk_path': 'relative/path/to/input.apk',
+#      'output_apk_path': 'relative/path/to/output.apk',
+#    },
+#    'includes': [ '../../build/android/finalize_apk_action.gypi' ],
+#  },
+#
+
+{
+  'message': 'Signing/aligning <(_target_name) APK: <(input_apk_path)',
+  'variables': {
+    'keystore_path%': '<(DEPTH)/build/android/ant/chromium-debug.keystore',
+    'keystore_name%': 'chromiumdebugkey',
+    'keystore_password%': 'chromium',
+    'zipalign_path%': '<(android_sdk_tools)/zipalign',
+    'rezip_apk_jar_path%': '<(PRODUCT_DIR)/lib.java/rezip_apk.jar',
+    'load_library_from_zip%': 0,
+  },
+  'inputs': [
+    '<(DEPTH)/build/android/gyp/finalize_apk.py',
+    '<(DEPTH)/build/android/gyp/util/build_utils.py',
+    '<(keystore_path)',
+    '<(input_apk_path)',
+  ],
+  'outputs': [
+    '<(output_apk_path)',
+  ],
+  'action': [
+    'python', '<(DEPTH)/build/android/gyp/finalize_apk.py',
+    '--zipalign-path=<(zipalign_path)',
+    '--unsigned-apk-path=<(input_apk_path)',
+    '--final-apk-path=<(output_apk_path)',
+    '--key-path=<(keystore_path)',
+    '--key-name=<(keystore_name)',
+    '--key-passwd=<(keystore_password)',
+    '--load-library-from-zip=<(load_library_from_zip)',
+    '--rezip-apk-jar-path=<(rezip_apk_jar_path)',
+  ],
+}
diff --git a/build/android/finalize_splits_action.gypi b/build/android/finalize_splits_action.gypi
new file mode 100644
index 0000000..daa7f83
--- /dev/null
+++ b/build/android/finalize_splits_action.gypi
@@ -0,0 +1,76 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This file is meant to be included into an action to provide an action that
+# signs and zipaligns split APKs.
+#
+# Required variables:
+#  apk_name - Base name of the apk.
+# Optional variables:
+#  density_splits - Whether to process density splits
+#  language_splits - Whether to language splits
+
+{
+  'variables': {
+    'keystore_path%': '<(DEPTH)/build/android/ant/chromium-debug.keystore',
+    'keystore_name%': 'chromiumdebugkey',
+    'keystore_password%': 'chromium',
+    'zipalign_path%': '<(android_sdk_tools)/zipalign',
+    'density_splits%': 0,
+    'language_splits%': [],
+    'resource_packaged_apk_name': '<(apk_name)-resources.ap_',
+    'resource_packaged_apk_path': '<(intermediate_dir)/<(resource_packaged_apk_name)',
+    'base_output_path': '<(PRODUCT_DIR)/apks/<(apk_name)',
+  },
+  'inputs': [
+    '<(DEPTH)/build/android/gyp/finalize_splits.py',
+    '<(DEPTH)/build/android/gyp/finalize_apk.py',
+    '<(DEPTH)/build/android/gyp/util/build_utils.py',
+    '<(keystore_path)',
+  ],
+  'action': [
+    'python', '<(DEPTH)/build/android/gyp/finalize_splits.py',
+    '--resource-packaged-apk-path=<(resource_packaged_apk_path)',
+    '--base-output-path=<(base_output_path)',
+    '--zipalign-path=<(zipalign_path)',
+    '--key-path=<(keystore_path)',
+    '--key-name=<(keystore_name)',
+    '--key-passwd=<(keystore_password)',
+  ],
+  'conditions': [
+    ['density_splits == 1', {
+      'message': 'Signing/aligning <(_target_name) density splits',
+      'inputs': [
+        '<(resource_packaged_apk_path)_hdpi',
+        '<(resource_packaged_apk_path)_xhdpi',
+        '<(resource_packaged_apk_path)_xxhdpi',
+        '<(resource_packaged_apk_path)_xxxhdpi',
+        '<(resource_packaged_apk_path)_tvdpi',
+      ],
+      'outputs': [
+        '<(base_output_path)-density-hdpi.apk',
+        '<(base_output_path)-density-xhdpi.apk',
+        '<(base_output_path)-density-xxhdpi.apk',
+        '<(base_output_path)-density-xxxhdpi.apk',
+        '<(base_output_path)-density-tvdpi.apk',
+      ],
+      'action': [
+        '--densities=hdpi,xhdpi,xxhdpi,xxxhdpi,tvdpi',
+      ],
+    }],
+    ['language_splits != []', {
+      'message': 'Signing/aligning <(_target_name) language splits',
+      'inputs': [
+        "<!@(python <(DEPTH)/build/apply_locales.py '<(resource_packaged_apk_path)_ZZLOCALE' <(language_splits))",
+      ],
+      'outputs': [
+        "<!@(python <(DEPTH)/build/apply_locales.py '<(base_output_path)-lang-ZZLOCALE.apk' <(language_splits))",
+      ],
+      'action': [
+        '--languages=<(language_splits)',
+      ],
+    }],
+  ],
+}
+
diff --git a/build/android/findbugs_diff.py b/build/android/findbugs_diff.py
new file mode 100755
index 0000000..8034b71
--- /dev/null
+++ b/build/android/findbugs_diff.py
@@ -0,0 +1,120 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Runs findbugs, and returns an error code if there are new warnings.
+
+Other options
+  --only-analyze used to only analyze the class you are interested.
+  --relase-build analyze the classes in out/Release directory.
+  --findbugs-args used to passin other findbugs's options.
+
+Run
+  $CHROMIUM_SRC/third_party/findbugs/bin/findbugs -textui for details.
+
+"""
+
+import argparse
+import os
+import sys
+
+import devil_chromium
+from devil.utils import run_tests_helper
+
+from pylib.constants import host_paths
+from pylib.utils import findbugs
+
+_DEFAULT_BASE_DIR = os.path.join(
+    host_paths.DIR_SOURCE_ROOT, 'build', 'android', 'findbugs_filter')
+
+sys.path.append(
+    os.path.join(host_paths.DIR_SOURCE_ROOT, 'build', 'android', 'gyp'))
+from util import build_utils # pylint: disable=import-error
+
+
+def main():
+  parser = argparse.ArgumentParser()
+
+  parser.add_argument(
+      '-v', '--verbose', action='count', help='Enable verbose logging.')
+  parser.add_argument(
+      '-a', '--auxclasspath', default=None, dest='auxclasspath',
+      help='Set aux classpath for analysis.')
+  parser.add_argument(
+      '--auxclasspath-gyp', dest='auxclasspath_gyp',
+      help='A gyp list containing the aux classpath for analysis')
+  parser.add_argument(
+      '-o', '--only-analyze', default=None,
+      dest='only_analyze', help='Only analyze the given classes and packages.')
+  parser.add_argument(
+      '-e', '--exclude', default=None, dest='exclude',
+      help='Exclude bugs matching given filter.')
+  parser.add_argument(
+      '-l', '--release-build', action='store_true', dest='release_build',
+      help='Analyze release build instead of debug.')
+  parser.add_argument(
+      '-f', '--findbug-args', default=None, dest='findbug_args',
+      help='Additional findbug arguments.')
+  parser.add_argument(
+      '-b', '--base-dir', default=_DEFAULT_BASE_DIR,
+      dest='base_dir', help='Base directory for configuration file.')
+  parser.add_argument(
+      '--output-file', dest='output_file',
+      help='Path to save the output to.')
+  parser.add_argument(
+      '--stamp', help='Path to touch on success.')
+  parser.add_argument(
+      '--depfile', help='Path to the depfile. This must be specified as the '
+                        "action's first output.")
+
+  parser.add_argument(
+      'jar_paths', metavar='JAR_PATH', nargs='+',
+      help='JAR file to analyze')
+
+  args = parser.parse_args(build_utils.ExpandFileArgs(sys.argv[1:]))
+
+  run_tests_helper.SetLogLevel(args.verbose)
+
+  devil_chromium.Initialize()
+
+  if args.auxclasspath:
+    args.auxclasspath = args.auxclasspath.split(':')
+  elif args.auxclasspath_gyp:
+    args.auxclasspath = build_utils.ParseGypList(args.auxclasspath_gyp)
+
+  if args.base_dir:
+    if not args.exclude:
+      args.exclude = os.path.join(args.base_dir, 'findbugs_exclude.xml')
+
+  findbugs_command, findbugs_warnings = findbugs.Run(
+      args.exclude, args.only_analyze, args.auxclasspath,
+      args.output_file, args.findbug_args, args.jar_paths)
+
+  if findbugs_warnings:
+    print
+    print '*' * 80
+    print 'FindBugs run via:'
+    print findbugs_command
+    print
+    print 'FindBugs reported the following issues:'
+    for warning in sorted(findbugs_warnings):
+      print str(warning)
+    print '*' * 80
+    print
+  else:
+    if args.depfile:
+      build_utils.WriteDepfile(
+          args.depfile,
+          build_utils.GetPythonDependencies() + args.auxclasspath
+              + args.jar_paths)
+    if args.stamp:
+      build_utils.Touch(args.stamp)
+
+  return len(findbugs_warnings)
+
+
+if __name__ == '__main__':
+  sys.exit(main())
+
diff --git a/build/android/findbugs_filter/findbugs_exclude.xml b/build/android/findbugs_filter/findbugs_exclude.xml
new file mode 100644
index 0000000..320a2bf
--- /dev/null
+++ b/build/android/findbugs_filter/findbugs_exclude.xml
@@ -0,0 +1,24 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Copyright (c) 2012 The Chromium Authors. All rights reserved.
+  Use of this source code is governed by a BSD-style license that can be
+  found in the LICENSE file.
+-->
+
+<!--
+Documentation: http://findbugs.sourceforge.net/manual/filter.html
+In particular, ~ at the start of a string means it's a regex.
+-->
+<FindBugsFilter>
+  <!-- Skip the generated resource classes (including nested classes). -->
+  <Match>
+    <Class name="~.*\.R(\$\w+)?" />
+  </Match>
+  <!-- Skip the generated Manifest class (including nested classes). -->
+  <Match>
+    <Class name="~.*\.Manifest(\$\w+)?" />
+  </Match>
+  <Bug pattern="DM_STRING_CTOR" />
+  <!-- Ignore "reliance on default String encoding" warnings, as we're not multi-platform -->
+  <Bug pattern="DM_DEFAULT_ENCODING" />
+</FindBugsFilter>
diff --git a/build/android/generate_emma_html.py b/build/android/generate_emma_html.py
new file mode 100755
index 0000000..9d1d733
--- /dev/null
+++ b/build/android/generate_emma_html.py
@@ -0,0 +1,101 @@
+#!/usr/bin/env python
+
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Aggregates EMMA coverage files to produce html output."""
+
+import fnmatch
+import json
+import optparse
+import os
+import sys
+
+import devil_chromium
+from devil.utils import cmd_helper
+from pylib import constants
+from pylib.constants import host_paths
+
+
+def _GetFilesWithExt(root_dir, ext):
+  """Gets all files with a given extension.
+
+  Args:
+    root_dir: Directory in which to search for files.
+    ext: Extension to look for (including dot)
+
+  Returns:
+    A list of absolute paths to files that match.
+  """
+  files = []
+  for root, _, filenames in os.walk(root_dir):
+    basenames = fnmatch.filter(filenames, '*.' + ext)
+    files.extend([os.path.join(root, basename)
+                  for basename in basenames])
+
+  return files
+
+
+def main():
+  option_parser = optparse.OptionParser()
+  option_parser.add_option('--output', help='HTML output filename.')
+  option_parser.add_option('--coverage-dir', default=None,
+                           help=('Root of the directory in which to search for '
+                                 'coverage data (.ec) files.'))
+  option_parser.add_option('--metadata-dir', default=None,
+                           help=('Root of the directory in which to search for '
+                                 'coverage metadata (.em) files.'))
+  option_parser.add_option('--cleanup', action='store_true',
+                           help=('If set, removes coverage files generated at '
+                                 'runtime.'))
+  options, _ = option_parser.parse_args()
+
+  devil_chromium.Initialize()
+
+  if not (options.coverage_dir and options.metadata_dir and options.output):
+    option_parser.error('One or more mandatory options are missing.')
+
+  coverage_files = _GetFilesWithExt(options.coverage_dir, 'ec')
+  metadata_files = _GetFilesWithExt(options.metadata_dir, 'em')
+  # Filter out zero-length files. These are created by emma_instr.py when a
+  # target has no classes matching the coverage filter.
+  metadata_files = [f for f in metadata_files if os.path.getsize(f)]
+  print 'Found coverage files: %s' % str(coverage_files)
+  print 'Found metadata files: %s' % str(metadata_files)
+
+  sources = []
+  for f in metadata_files:
+    sources_file = os.path.splitext(f)[0] + '_sources.txt'
+    with open(sources_file, 'r') as sf:
+      sources.extend(json.load(sf))
+  sources = [os.path.join(host_paths.DIR_SOURCE_ROOT, s) for s in sources]
+  print 'Sources: %s' % sources
+
+  input_args = []
+  for f in coverage_files + metadata_files:
+    input_args.append('-in')
+    input_args.append(f)
+
+  output_args = ['-Dreport.html.out.file', options.output]
+  source_args = ['-sp', ','.join(sources)]
+
+  exit_code = cmd_helper.RunCmd(
+      ['java', '-cp',
+       os.path.join(constants.ANDROID_SDK_ROOT, 'tools', 'lib', 'emma.jar'),
+       'emma', 'report', '-r', 'html']
+      + input_args + output_args + source_args)
+
+  if options.cleanup:
+    for f in coverage_files:
+      os.remove(f)
+
+  # Command tends to exit with status 0 when it actually failed.
+  if not exit_code and not os.path.exists(options.output):
+    exit_code = 1
+
+  return exit_code
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/build/android/gn/zip.py b/build/android/gn/zip.py
new file mode 100755
index 0000000..b80e0a1
--- /dev/null
+++ b/build/android/gn/zip.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Archives a set of files.
+"""
+
+import ast
+import optparse
+import os
+import sys
+
+sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, 'gyp'))
+from util import build_utils
+
+def main():
+  parser = optparse.OptionParser()
+  build_utils.AddDepfileOption(parser)
+
+  parser.add_option('--inputs', help='List of files to archive.')
+  parser.add_option('--output', help='Path to output archive.')
+  parser.add_option('--base-dir',
+                    help='If provided, the paths in the archive will be '
+                    'relative to this directory', default='.')
+
+  options, _ = parser.parse_args()
+
+  inputs = ast.literal_eval(options.inputs)
+  output = options.output
+  base_dir = options.base_dir
+
+  build_utils.DoZip(inputs, output, base_dir)
+
+  if options.depfile:
+    build_utils.WriteDepfile(
+        options.depfile,
+        build_utils.GetPythonDependencies())
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/build/android/gyp/aidl.py b/build/android/gyp/aidl.py
new file mode 100755
index 0000000..85ceeae
--- /dev/null
+++ b/build/android/gyp/aidl.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Invokes Android's aidl
+"""
+
+import optparse
+import os
+import re
+import sys
+import zipfile
+
+from util import build_utils
+
+
+def main(argv):
+  option_parser = optparse.OptionParser()
+  build_utils.AddDepfileOption(option_parser)
+  option_parser.add_option('--aidl-path', help='Path to the aidl binary.')
+  option_parser.add_option('--imports', help='Files to import.')
+  option_parser.add_option('--includes',
+                           help='Directories to add as import search paths.')
+  option_parser.add_option('--srcjar', help='Path for srcjar output.')
+  options, args = option_parser.parse_args(argv[1:])
+
+  with build_utils.TempDir() as temp_dir:
+    for f in args:
+      classname = os.path.splitext(os.path.basename(f))[0]
+      output = os.path.join(temp_dir, classname + '.java')
+      aidl_cmd = [options.aidl_path]
+      aidl_cmd += [
+        '-p' + s for s in build_utils.ParseGypList(options.imports)
+      ]
+      if options.includes is not None:
+        aidl_cmd += [
+          '-I' + s for s in build_utils.ParseGypList(options.includes)
+        ]
+      aidl_cmd += [
+        f,
+        output
+      ]
+      build_utils.CheckOutput(aidl_cmd)
+
+    with zipfile.ZipFile(options.srcjar, 'w') as srcjar:
+      for path in build_utils.FindInDirectory(temp_dir, '*.java'):
+        with open(path) as fileobj:
+          data = fileobj.read()
+        pkg_name = re.search(r'^\s*package\s+(.*?)\s*;', data, re.M).group(1)
+        arcname = '%s/%s' % (pkg_name.replace('.', '/'), os.path.basename(path))
+        srcjar.writestr(arcname, data)
+
+  if options.depfile:
+    build_utils.WriteDepfile(
+        options.depfile,
+        build_utils.GetPythonDependencies())
+
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv))
diff --git a/build/android/gyp/ant.py b/build/android/gyp/ant.py
new file mode 100755
index 0000000..5394b9e
--- /dev/null
+++ b/build/android/gyp/ant.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+#
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""An Ant wrapper that suppresses useless Ant output.
+
+Ant build scripts output "BUILD SUCCESSFUL" and build timing at the end of
+every build. In the Android build, this just adds a lot of useless noise to the
+build output. This script forwards its arguments to ant, and prints Ant's
+output up until the BUILD SUCCESSFUL line.
+
+Also, when a command fails, this script will re-run that ant command with the
+'-verbose' argument so that the failure is easier to debug.
+"""
+
+import optparse
+import sys
+import traceback
+
+from util import build_utils
+
+
+def main(argv):
+  option_parser = optparse.OptionParser()
+  build_utils.AddDepfileOption(option_parser)
+  options, args = option_parser.parse_args(argv[1:])
+
+  try:
+    stdout = build_utils.CheckOutput(['ant'] + args)
+  except build_utils.CalledProcessError:
+    # It is very difficult to diagnose ant failures without the '-verbose'
+    # argument. So, when an ant command fails, re-run it with '-verbose' so that
+    # the cause of the failure is easier to identify.
+    verbose_args = ['-verbose'] + [a for a in args if a != '-quiet']
+    try:
+      stdout = build_utils.CheckOutput(['ant'] + verbose_args)
+    except build_utils.CalledProcessError:
+      traceback.print_exc()
+      sys.exit(1)
+
+    # If this did sys.exit(1), building again would succeed (which would be
+    # awkward). Instead, just print a big warning.
+    build_utils.PrintBigWarning(
+        'This is unexpected. `ant ' + ' '.join(args) + '` failed.' +
+        'But, running `ant ' + ' '.join(verbose_args) + '` passed.')
+
+  stdout = stdout.strip().split('\n')
+  for line in stdout:
+    if line.strip() == 'BUILD SUCCESSFUL':
+      break
+    print line
+
+  if options.depfile:
+    assert '-buildfile' in args
+    ant_buildfile = args[args.index('-buildfile') + 1]
+
+    build_utils.WriteDepfile(
+        options.depfile,
+        [ant_buildfile] + build_utils.GetPythonDependencies())
+
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv))
diff --git a/build/android/gyp/apk_install.py b/build/android/gyp/apk_install.py
new file mode 100755
index 0000000..9c90763
--- /dev/null
+++ b/build/android/gyp/apk_install.py
@@ -0,0 +1,125 @@
+#!/usr/bin/env python
+#
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Installs an APK.
+
+"""
+
+import optparse
+import os
+import re
+import sys
+
+from util import build_device
+from util import build_utils
+from util import md5_check
+
+BUILD_ANDROID_DIR = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), '..'))
+sys.path.append(BUILD_ANDROID_DIR)
+
+import devil_chromium
+from devil.android import apk_helper
+from pylib import constants
+
+
+def GetNewMetadata(device, apk_package):
+  """Gets the metadata on the device for the apk_package apk."""
+  output = device.RunShellCommand('ls -l /data/app/')
+  # Matches lines like:
+  # -rw-r--r-- system   system    7376582 2013-04-19 16:34 \
+  # org.chromium.chrome.apk
+  # -rw-r--r-- system   system    7376582 2013-04-19 16:34 \
+  # org.chromium.chrome-1.apk
+  apk_matcher = lambda s: re.match('.*%s(-[0-9]*)?(.apk)?$' % apk_package, s)
+  matches = filter(apk_matcher, output)
+  return matches[0] if matches else None
+
+def HasInstallMetadataChanged(device, apk_package, metadata_path):
+  """Checks if the metadata on the device for apk_package has changed."""
+  if not os.path.exists(metadata_path):
+    return True
+
+  with open(metadata_path, 'r') as expected_file:
+    return expected_file.read() != device.GetInstallMetadata(apk_package)
+
+
+def RecordInstallMetadata(device, apk_package, metadata_path):
+  """Records the metadata from the device for apk_package."""
+  metadata = GetNewMetadata(device, apk_package)
+  if not metadata:
+    raise Exception('APK install failed unexpectedly.')
+
+  with open(metadata_path, 'w') as outfile:
+    outfile.write(metadata)
+
+
+def main():
+  parser = optparse.OptionParser()
+  parser.add_option('--apk-path',
+      help='Path to .apk to install.')
+  parser.add_option('--split-apk-path',
+      help='Path to .apk splits (can specify multiple times, causes '
+      '--install-multiple to be used.',
+      action='append')
+  parser.add_option('--android-sdk-tools',
+      help='Path to the Android SDK build tools folder. ' +
+           'Required when using --split-apk-path.')
+  parser.add_option('--install-record',
+      help='Path to install record (touched only when APK is installed).')
+  parser.add_option('--build-device-configuration',
+      help='Path to build device configuration.')
+  parser.add_option('--stamp',
+      help='Path to touch on success.')
+  parser.add_option('--configuration-name',
+      help='The build CONFIGURATION_NAME')
+  parser.add_option('--output-directory',
+      help='The output directory.')
+  options, _ = parser.parse_args()
+
+  constants.SetBuildType(options.configuration_name)
+
+  devil_chromium.Initialize(
+      output_directory=os.path.abspath(options.output_directory))
+
+  device = build_device.GetBuildDeviceFromPath(
+      options.build_device_configuration)
+  if not device:
+    return
+
+  serial_number = device.GetSerialNumber()
+  apk_package = apk_helper.GetPackageName(options.apk_path)
+
+  metadata_path = '%s.%s.device.time.stamp' % (options.apk_path, serial_number)
+
+  # If the APK on the device does not match the one that was last installed by
+  # the build, then the APK has to be installed (regardless of the md5 record).
+  force_install = HasInstallMetadataChanged(device, apk_package, metadata_path)
+
+
+  def Install():
+    if options.split_apk_path:
+      device.InstallSplitApk(options.apk_path, options.split_apk_path)
+    else:
+      device.Install(options.apk_path, reinstall=True)
+
+    RecordInstallMetadata(device, apk_package, metadata_path)
+    build_utils.Touch(options.install_record)
+
+
+  record_path = '%s.%s.md5.stamp' % (options.apk_path, serial_number)
+  md5_check.CallAndRecordIfStale(
+      Install,
+      record_path=record_path,
+      input_paths=[options.apk_path],
+      force=force_install)
+
+  if options.stamp:
+    build_utils.Touch(options.stamp)
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/build/android/gyp/apk_obfuscate.py b/build/android/gyp/apk_obfuscate.py
new file mode 100755
index 0000000..99b6176
--- /dev/null
+++ b/build/android/gyp/apk_obfuscate.py
@@ -0,0 +1,185 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Generates the obfuscated jar and test jar for an apk.
+
+If proguard is not enabled or 'Release' is not in the configuration name,
+obfuscation will be a no-op.
+"""
+
+import json
+import optparse
+import os
+import sys
+import tempfile
+
+from util import build_utils
+from util import proguard_util
+
+
+_PROGUARD_KEEP_CLASS = '''-keep class %s {
+  *;
+}
+'''
+
+
+def ParseArgs(argv):
+  parser = optparse.OptionParser()
+  parser.add_option('--android-sdk', help='path to the Android SDK folder')
+  parser.add_option('--android-sdk-tools',
+                    help='path to the Android SDK build tools folder')
+  parser.add_option('--android-sdk-jar',
+                    help='path to Android SDK\'s android.jar')
+  parser.add_option('--proguard-jar-path',
+                    help='Path to proguard.jar in the sdk')
+  parser.add_option('--input-jars-paths',
+                    help='Path to jars to include in obfuscated jar')
+
+  parser.add_option('--proguard-configs',
+                    help='Paths to proguard config files')
+
+  parser.add_option('--configuration-name',
+                    help='Gyp configuration name (i.e. Debug, Release)')
+
+  parser.add_option('--debug-build-proguard-enabled', action='store_true',
+                    help='--proguard-enabled takes effect on release '
+                         'build, this flag enable the proguard on debug '
+                         'build.')
+  parser.add_option('--proguard-enabled', action='store_true',
+                    help='Set if proguard is enabled for this target.')
+
+  parser.add_option('--obfuscated-jar-path',
+                    help='Output path for obfuscated jar.')
+
+  parser.add_option('--testapp', action='store_true',
+                    help='Set this if building an instrumentation test apk')
+  parser.add_option('--tested-apk-obfuscated-jar-path',
+                    help='Path to obfusctated jar of the tested apk')
+  parser.add_option('--test-jar-path',
+                    help='Output path for jar containing all the test apk\'s '
+                    'code.')
+
+  parser.add_option('--stamp', help='File to touch on success')
+
+  parser.add_option('--main-dex-list-path',
+                    help='The list of classes to retain in the main dex. '
+                         'These will not be obfuscated.')
+  parser.add_option('--multidex-configuration-path',
+                    help='A JSON file containing multidex build configuration.')
+  parser.add_option('--verbose', '-v', action='store_true',
+                    help='Print all proguard output')
+
+  (options, args) = parser.parse_args(argv)
+
+  if args:
+    parser.error('No positional arguments should be given. ' + str(args))
+
+  # Check that required options have been provided.
+  required_options = (
+      'android_sdk',
+      'android_sdk_tools',
+      'android_sdk_jar',
+      'proguard_jar_path',
+      'input_jars_paths',
+      'configuration_name',
+      'obfuscated_jar_path',
+      )
+
+  if options.testapp:
+    required_options += (
+        'test_jar_path',
+        )
+
+  build_utils.CheckOptions(options, parser, required=required_options)
+  return options, args
+
+
+def DoProguard(options):
+  proguard = proguard_util.ProguardCmdBuilder(options.proguard_jar_path)
+  proguard.outjar(options.obfuscated_jar_path)
+
+  input_jars = build_utils.ParseGypList(options.input_jars_paths)
+
+  exclude_paths = []
+  configs = build_utils.ParseGypList(options.proguard_configs)
+  if options.tested_apk_obfuscated_jar_path:
+    # configs should only contain the process_resources.py generated config.
+    assert len(configs) == 1, (
+        'test apks should not have custom proguard configs: ' + str(configs))
+    proguard.tested_apk_info(options.tested_apk_obfuscated_jar_path + '.info')
+
+  proguard.libraryjars([options.android_sdk_jar])
+  proguard_injars = [p for p in input_jars if p not in exclude_paths]
+  proguard.injars(proguard_injars)
+
+  multidex_config = _PossibleMultidexConfig(options)
+  if multidex_config:
+    configs.append(multidex_config)
+
+  proguard.configs(configs)
+  proguard.verbose(options.verbose)
+  proguard.CheckOutput()
+
+
+def _PossibleMultidexConfig(options):
+  if not options.multidex_configuration_path:
+    return None
+
+  with open(options.multidex_configuration_path) as multidex_config_file:
+    multidex_config = json.loads(multidex_config_file.read())
+
+  if not (multidex_config.get('enabled') and options.main_dex_list_path):
+    return None
+
+  main_dex_list_config = ''
+  with open(options.main_dex_list_path) as main_dex_list:
+    for clazz in (l.strip() for l in main_dex_list):
+      if clazz.endswith('.class'):
+        clazz = clazz[:-len('.class')]
+      clazz = clazz.replace('/', '.')
+      main_dex_list_config += (_PROGUARD_KEEP_CLASS % clazz)
+  with tempfile.NamedTemporaryFile(
+      delete=False,
+      dir=os.path.dirname(options.main_dex_list_path),
+      prefix='main_dex_list_proguard',
+      suffix='.flags') as main_dex_config_file:
+    main_dex_config_file.write(main_dex_list_config)
+  return main_dex_config_file.name
+
+
+def main(argv):
+  options, _ = ParseArgs(argv)
+
+  input_jars = build_utils.ParseGypList(options.input_jars_paths)
+
+  if options.testapp:
+    dependency_class_filters = [
+        '*R.class', '*R$*.class', '*Manifest.class', '*BuildConfig.class']
+    build_utils.MergeZips(
+        options.test_jar_path, input_jars, dependency_class_filters)
+
+  if ((options.configuration_name == 'Release' and options.proguard_enabled) or
+     (options.configuration_name == 'Debug' and
+      options.debug_build_proguard_enabled)):
+    DoProguard(options)
+  else:
+    output_files = [
+        options.obfuscated_jar_path,
+        options.obfuscated_jar_path + '.info',
+        options.obfuscated_jar_path + '.dump',
+        options.obfuscated_jar_path + '.seeds',
+        options.obfuscated_jar_path + '.usage',
+        options.obfuscated_jar_path + '.mapping']
+    for f in output_files:
+      if os.path.exists(f):
+        os.remove(f)
+      build_utils.Touch(f)
+
+  if options.stamp:
+    build_utils.Touch(options.stamp)
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv[1:]))
diff --git a/build/android/gyp/apkbuilder.py b/build/android/gyp/apkbuilder.py
new file mode 100755
index 0000000..ef5f2cf
--- /dev/null
+++ b/build/android/gyp/apkbuilder.py
@@ -0,0 +1,306 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Adds the code parts to a resource APK."""
+
+import argparse
+import itertools
+import os
+import shutil
+import sys
+import zipfile
+
+from util import build_utils
+
+
+# Taken from aapt's Package.cpp:
+_NO_COMPRESS_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.gif', '.wav', '.mp2',
+                           '.mp3', '.ogg', '.aac', '.mpg', '.mpeg', '.mid',
+                           '.midi', '.smf', '.jet', '.rtttl', '.imy', '.xmf',
+                           '.mp4', '.m4a', '.m4v', '.3gp', '.3gpp', '.3g2',
+                           '.3gpp2', '.amr', '.awb', '.wma', '.wmv', '.webm')
+
+
+def _ParseArgs(args):
+  parser = argparse.ArgumentParser()
+  build_utils.AddDepfileOption(parser)
+  parser.add_argument('--assets',
+                      help='GYP-list of files to add as assets in the form '
+                           '"srcPath:zipPath", where ":zipPath" is optional.',
+                      default='[]')
+  parser.add_argument('--write-asset-list',
+                      action='store_true',
+                      help='Whether to create an assets/assets_list file.')
+  parser.add_argument('--uncompressed-assets',
+                      help='Same as --assets, except disables compression.',
+                      default='[]')
+  parser.add_argument('--resource-apk',
+                      help='An .ap_ file built using aapt',
+                      required=True)
+  parser.add_argument('--output-apk',
+                      help='Path to the output file',
+                      required=True)
+  parser.add_argument('--dex-file',
+                      help='Path to the classes.dex to use')
+  parser.add_argument('--native-libs',
+                      action='append',
+                      help='GYP-list of native libraries to include. '
+                           'Can be specified multiple times.',
+                      default=[])
+  parser.add_argument('--secondary-native-libs',
+                      action='append',
+                      help='GYP-list of native libraries for secondary '
+                           'android-abi. Can be specified multiple times.',
+                      default=[])
+  parser.add_argument('--android-abi',
+                      help='Android architecture to use for native libraries')
+  parser.add_argument('--secondary-android-abi',
+                      help='The secondary Android architecture to use for'
+                           'secondary native libraries')
+  parser.add_argument('--native-lib-placeholders',
+                      help='GYP-list of native library placeholders to add.',
+                      default='[]')
+  parser.add_argument('--emma-device-jar',
+                      help='Path to emma_device.jar to include.')
+  parser.add_argument('--uncompress-shared-libraries',
+                      action='store_true',
+                      help='Uncompress shared libraries')
+  options = parser.parse_args(args)
+  options.assets = build_utils.ParseGypList(options.assets)
+  options.uncompressed_assets = build_utils.ParseGypList(
+      options.uncompressed_assets)
+  options.native_lib_placeholders = build_utils.ParseGypList(
+      options.native_lib_placeholders)
+  all_libs = []
+  for gyp_list in options.native_libs:
+    all_libs.extend(build_utils.ParseGypList(gyp_list))
+  options.native_libs = all_libs
+  secondary_libs = []
+  for gyp_list in options.secondary_native_libs:
+    secondary_libs.extend(build_utils.ParseGypList(gyp_list))
+  options.secondary_native_libs = secondary_libs
+
+
+  if not options.android_abi and (options.native_libs or
+                                  options.native_lib_placeholders):
+    raise Exception('Must specify --android-abi with --native-libs')
+  if not options.secondary_android_abi and options.secondary_native_libs:
+    raise Exception('Must specify --secondary-android-abi with'
+                    ' --secondary-native-libs')
+  return options
+
+
+def _SplitAssetPath(path):
+  """Returns (src, dest) given an asset path in the form src[:dest]."""
+  path_parts = path.split(':')
+  src_path = path_parts[0]
+  if len(path_parts) > 1:
+    dest_path = path_parts[1]
+  else:
+    dest_path = os.path.basename(src_path)
+  return src_path, dest_path
+
+
+def _ExpandPaths(paths):
+  """Converts src:dst into tuples and enumerates files within directories.
+
+  Args:
+    paths: Paths in the form "src_path:dest_path"
+
+  Returns:
+    A list of (src_path, dest_path) tuples sorted by dest_path (for stable
+    ordering within output .apk).
+  """
+  ret = []
+  for path in paths:
+    src_path, dest_path = _SplitAssetPath(path)
+    if os.path.isdir(src_path):
+      for f in build_utils.FindInDirectory(src_path, '*'):
+        ret.append((f, os.path.join(dest_path, f[len(src_path) + 1:])))
+    else:
+      ret.append((src_path, dest_path))
+  ret.sort(key=lambda t:t[1])
+  return ret
+
+
+def _AddAssets(apk, path_tuples, disable_compression=False):
+  """Adds the given paths to the apk.
+
+  Args:
+    apk: ZipFile to write to.
+    paths: List of paths (with optional :zipPath suffix) to add.
+    disable_compression: Whether to disable compression.
+  """
+  # Group all uncompressed assets together in the hope that it will increase
+  # locality of mmap'ed files.
+  for target_compress in (False, True):
+    for src_path, dest_path in path_tuples:
+
+      compress = not disable_compression and (
+          os.path.splitext(src_path)[1] not in _NO_COMPRESS_EXTENSIONS)
+      if target_compress == compress:
+        apk_path = 'assets/' + dest_path
+        try:
+          apk.getinfo(apk_path)
+          # Should never happen since write_build_config.py handles merging.
+          raise Exception('Multiple targets specified the asset path: %s' %
+                          apk_path)
+        except KeyError:
+          build_utils.AddToZipHermetic(apk, apk_path, src_path=src_path,
+                                       compress=compress)
+
+
+def _CreateAssetsList(path_tuples):
+  """Returns a newline-separated list of asset paths for the given paths."""
+  dests = sorted(t[1] for t in path_tuples)
+  return '\n'.join(dests) + '\n'
+
+
+def _AddNativeLibraries(out_apk, native_libs, android_abi, uncompress):
+  """Add native libraries to APK."""
+  for path in native_libs:
+    basename = os.path.basename(path)
+    apk_path = 'lib/%s/%s' % (android_abi, basename)
+
+    compress = None
+    if (uncompress and os.path.splitext(basename)[1] == '.so'):
+      compress = False
+
+    build_utils.AddToZipHermetic(out_apk,
+                                 apk_path,
+                                 src_path=path,
+                                 compress=compress)
+
+
+def main(args):
+  args = build_utils.ExpandFileArgs(args)
+  options = _ParseArgs(args)
+
+  native_libs = sorted(options.native_libs)
+
+  input_paths = [options.resource_apk, __file__] + native_libs
+
+  secondary_native_libs = []
+  if options.secondary_native_libs:
+    secondary_native_libs = sorted(options.secondary_native_libs)
+    input_paths += secondary_native_libs
+
+  if options.dex_file:
+    input_paths.append(options.dex_file)
+
+  if options.emma_device_jar:
+    input_paths.append(options.emma_device_jar)
+
+  input_strings = [options.android_abi,
+                   options.native_lib_placeholders,
+                   options.uncompress_shared_libraries]
+
+  if options.secondary_android_abi:
+    input_strings.append(options.secondary_android_abi)
+
+  _assets = _ExpandPaths(options.assets)
+  _uncompressed_assets = _ExpandPaths(options.uncompressed_assets)
+
+  for src_path, dest_path in itertools.chain(_assets, _uncompressed_assets):
+    input_paths.append(src_path)
+    input_strings.append(dest_path)
+
+  def on_stale_md5():
+    tmp_apk = options.output_apk + '.tmp'
+    try:
+      # TODO(agrieve): It would be more efficient to combine this step
+      # with finalize_apk(), which sometimes aligns and uncompresses the
+      # native libraries.
+      with zipfile.ZipFile(options.resource_apk) as resource_apk, \
+           zipfile.ZipFile(tmp_apk, 'w', zipfile.ZIP_DEFLATED) as out_apk:
+        def copy_resource(zipinfo):
+          compress = zipinfo.compress_type != zipfile.ZIP_STORED
+          build_utils.AddToZipHermetic(out_apk, zipinfo.filename,
+                                       data=resource_apk.read(zipinfo.filename),
+                                       compress=compress)
+
+        # Make assets come before resources in order to maintain the same file
+        # ordering as GYP / aapt. http://crbug.com/561862
+        resource_infos = resource_apk.infolist()
+
+        # 1. AndroidManifest.xml
+        assert resource_infos[0].filename == 'AndroidManifest.xml'
+        copy_resource(resource_infos[0])
+
+        # 2. Assets
+        if options.write_asset_list:
+          data = _CreateAssetsList(
+              itertools.chain(_assets, _uncompressed_assets))
+          build_utils.AddToZipHermetic(out_apk, 'assets/assets_list', data=data)
+
+        _AddAssets(out_apk, _assets, disable_compression=False)
+        _AddAssets(out_apk, _uncompressed_assets, disable_compression=True)
+
+        # 3. Dex files
+        if options.dex_file and options.dex_file.endswith('.zip'):
+          with zipfile.ZipFile(options.dex_file, 'r') as dex_zip:
+            for dex in (d for d in dex_zip.namelist() if d.endswith('.dex')):
+              build_utils.AddToZipHermetic(out_apk, dex, data=dex_zip.read(dex))
+        elif options.dex_file:
+          build_utils.AddToZipHermetic(out_apk, 'classes.dex',
+                                       src_path=options.dex_file)
+
+        # 4. Native libraries.
+        _AddNativeLibraries(out_apk,
+                            native_libs,
+                            options.android_abi,
+                            options.uncompress_shared_libraries)
+
+        if options.secondary_android_abi:
+          _AddNativeLibraries(out_apk,
+                              secondary_native_libs,
+                              options.secondary_android_abi,
+                              options.uncompress_shared_libraries)
+
+        for name in sorted(options.native_lib_placeholders):
+          # Empty libs files are ignored by md5check, but rezip requires them
+          # to be empty in order to identify them as placeholders.
+          apk_path = 'lib/%s/%s' % (options.android_abi, name)
+          build_utils.AddToZipHermetic(out_apk, apk_path, data='')
+
+        # 5. Resources
+        for info in resource_infos[1:]:
+          copy_resource(info)
+
+        # 6. Java resources. Used only when coverage is enabled, so order
+        # doesn't matter).
+        if options.emma_device_jar:
+          # Add EMMA Java resources to APK.
+          with zipfile.ZipFile(options.emma_device_jar, 'r') as emma_device_jar:
+            for apk_path in emma_device_jar.namelist():
+              apk_path_lower = apk_path.lower()
+              if apk_path_lower.startswith('meta-inf/'):
+                continue
+
+              if apk_path_lower.endswith('/'):
+                continue
+
+              if apk_path_lower.endswith('.class'):
+                continue
+
+              build_utils.AddToZipHermetic(out_apk, apk_path,
+                                           data=emma_device_jar.read(apk_path))
+
+      shutil.move(tmp_apk, options.output_apk)
+    finally:
+      if os.path.exists(tmp_apk):
+        os.unlink(tmp_apk)
+
+  build_utils.CallAndWriteDepfileIfStale(
+      on_stale_md5,
+      options,
+      input_paths=input_paths,
+      input_strings=input_strings,
+      output_paths=[options.output_apk])
+
+
+if __name__ == '__main__':
+  main(sys.argv[1:])
diff --git a/build/android/gyp/configure_multidex.py b/build/android/gyp/configure_multidex.py
new file mode 100755
index 0000000..9f3b736
--- /dev/null
+++ b/build/android/gyp/configure_multidex.py
@@ -0,0 +1,87 @@
+#!/usr/bin/env python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import argparse
+import json
+import os
+import sys
+
+from util import build_utils
+
+
+_GCC_PREPROCESS_PATH = os.path.join(
+    os.path.dirname(__file__), 'gcc_preprocess.py')
+
+
+def ParseArgs():
+  parser = argparse.ArgumentParser()
+  parser.add_argument('--configuration-name', required=True,
+                      help='The build CONFIGURATION_NAME.')
+  parser.add_argument('--enable-multidex', action='store_true', default=False,
+                      help='If passed, multidex may be enabled.')
+  parser.add_argument('--enabled-configurations', default=[],
+                      help='The configuration(s) for which multidex should be '
+                           'enabled. If not specified and --enable-multidex is '
+                           'passed, multidex will be enabled for all '
+                           'configurations.')
+  parser.add_argument('--multidex-configuration-path', required=True,
+                      help='The path to which the multidex configuration JSON '
+                           'should be saved.')
+  parser.add_argument('--multidex-config-java-file', required=True)
+  parser.add_argument('--multidex-config-java-stamp', required=True)
+  parser.add_argument('--multidex-config-java-template', required=True)
+
+  args = parser.parse_args()
+
+  if args.enabled_configurations:
+    args.enabled_configurations = build_utils.ParseGypList(
+        args.enabled_configurations)
+
+  return args
+
+
+def _WriteConfigJson(multidex_enabled, multidex_configuration_path):
+  config = {
+    'enabled': multidex_enabled,
+  }
+
+  with open(multidex_configuration_path, 'w') as f:
+    f.write(json.dumps(config))
+
+
+def _GenerateMultidexConfigJava(multidex_enabled, args):
+  gcc_preprocess_cmd = [
+    sys.executable, _GCC_PREPROCESS_PATH,
+    '--include-path=',
+    '--template', args.multidex_config_java_template,
+    '--stamp', args.multidex_config_java_stamp,
+    '--output', args.multidex_config_java_file,
+  ]
+  if multidex_enabled:
+    gcc_preprocess_cmd += [
+      '--defines', 'ENABLE_MULTIDEX',
+    ]
+
+  build_utils.CheckOutput(gcc_preprocess_cmd)
+
+
+def main():
+  args = ParseArgs()
+
+  multidex_enabled = (
+      args.enable_multidex
+      and (not args.enabled_configurations
+           or args.configuration_name in args.enabled_configurations))
+
+  _WriteConfigJson(multidex_enabled, args.multidex_configuration_path)
+  _GenerateMultidexConfigJava(multidex_enabled, args)
+
+  return 0
+
+
+if __name__ == '__main__':
+  sys.exit(main())
+
diff --git a/build/android/gyp/copy_ex.py b/build/android/gyp/copy_ex.py
new file mode 100755
index 0000000..3d7434d
--- /dev/null
+++ b/build/android/gyp/copy_ex.py
@@ -0,0 +1,117 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Copies files to a directory."""
+
+import itertools
+import optparse
+import os
+import shutil
+import sys
+
+from util import build_utils
+
+
+def _get_all_files(base):
+  """Returns a list of all the files in |base|. Each entry is relative to the
+  last path entry of |base|."""
+  result = []
+  dirname = os.path.dirname(base)
+  for root, _, files in os.walk(base):
+    result.extend([os.path.join(root[len(dirname):], f) for f in files])
+  return result
+
+def CopyFile(f, dest, deps):
+  """Copy file or directory and update deps."""
+  if os.path.isdir(f):
+    shutil.copytree(f, os.path.join(dest, os.path.basename(f)))
+    deps.extend(_get_all_files(f))
+  else:
+    shutil.copy(f, dest)
+    deps.append(f)
+
+def DoCopy(options, deps):
+  """Copy files or directories given in options.files and update deps."""
+  files = list(itertools.chain.from_iterable(build_utils.ParseGypList(f)
+                                             for f in options.files))
+
+  for f in files:
+    if os.path.isdir(f) and not options.clear:
+      print ('To avoid stale files you must use --clear when copying '
+             'directories')
+      sys.exit(-1)
+    CopyFile(f, options.dest, deps)
+
+def DoRenaming(options, deps):
+  """Copy and rename files given in options.renaming_sources and update deps."""
+  src_files = list(itertools.chain.from_iterable(
+                   build_utils.ParseGypList(f)
+                   for f in options.renaming_sources))
+
+  dest_files = list(itertools.chain.from_iterable(
+                    build_utils.ParseGypList(f)
+                    for f in options.renaming_destinations))
+
+  if (len(src_files) != len(dest_files)):
+    print('Renaming source and destination files not match.')
+    sys.exit(-1)
+
+  for src, dest in itertools.izip(src_files, dest_files):
+    if os.path.isdir(src):
+      print ('renaming diretory is not supported.')
+      sys.exit(-1)
+    else:
+      CopyFile(src, os.path.join(options.dest, dest), deps)
+
+def main(args):
+  args = build_utils.ExpandFileArgs(args)
+
+  parser = optparse.OptionParser()
+  build_utils.AddDepfileOption(parser)
+
+  parser.add_option('--dest', help='Directory to copy files to.')
+  parser.add_option('--files', action='append',
+                    help='List of files to copy.')
+  parser.add_option('--clear', action='store_true',
+                    help='If set, the destination directory will be deleted '
+                    'before copying files to it. This is highly recommended to '
+                    'ensure that no stale files are left in the directory.')
+  parser.add_option('--stamp', help='Path to touch on success.')
+  parser.add_option('--renaming-sources',
+                    action='append',
+                    help='List of files need to be renamed while being '
+                         'copied to dest directory')
+  parser.add_option('--renaming-destinations',
+                    action='append',
+                    help='List of destination file name without path, the '
+                         'number of elements must match rename-sources.')
+
+  options, _ = parser.parse_args(args)
+
+  if options.clear:
+    build_utils.DeleteDirectory(options.dest)
+    build_utils.MakeDirectory(options.dest)
+
+  deps = []
+
+  if options.files:
+    DoCopy(options, deps)
+
+  if options.renaming_sources:
+    DoRenaming(options, deps)
+
+  if options.depfile:
+    build_utils.WriteDepfile(
+        options.depfile,
+        deps + build_utils.GetPythonDependencies())
+
+  if options.stamp:
+    build_utils.Touch(options.stamp)
+
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv[1:]))
+
diff --git a/build/android/gyp/create_device_library_links.py b/build/android/gyp/create_device_library_links.py
new file mode 100755
index 0000000..c7f59dd
--- /dev/null
+++ b/build/android/gyp/create_device_library_links.py
@@ -0,0 +1,121 @@
+#!/usr/bin/env python
+#
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Creates symlinks to native libraries for an APK.
+
+The native libraries should have previously been pushed to the device (in
+options.target_dir). This script then creates links in an apk's lib/ folder to
+those native libraries.
+"""
+
+import optparse
+import os
+import sys
+
+from util import build_device
+from util import build_utils
+
+BUILD_ANDROID_DIR = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), '..'))
+sys.path.append(BUILD_ANDROID_DIR)
+
+import devil_chromium
+from devil.android import apk_helper
+from pylib import constants
+
+def RunShellCommand(device, cmd):
+  output = device.RunShellCommand(cmd, check_return=True)
+
+  if output:
+    raise Exception(
+        'Unexpected output running command: ' + cmd + '\n' +
+        '\n'.join(output))
+
+
+def CreateSymlinkScript(options):
+  libraries = build_utils.ParseGypList(options.libraries)
+
+  link_cmd = (
+      'rm $APK_LIBRARIES_DIR/%(lib_basename)s > /dev/null 2>&1 \n'
+      'ln -s $STRIPPED_LIBRARIES_DIR/%(lib_basename)s '
+        '$APK_LIBRARIES_DIR/%(lib_basename)s \n'
+      )
+
+  script = '#!/bin/sh \n'
+
+  for lib in libraries:
+    script += link_cmd % { 'lib_basename': lib }
+
+  with open(options.script_host_path, 'w') as scriptfile:
+    scriptfile.write(script)
+
+
+def TriggerSymlinkScript(options):
+  device = build_device.GetBuildDeviceFromPath(
+      options.build_device_configuration)
+  if not device:
+    return
+
+  apk_package = apk_helper.GetPackageName(options.apk)
+  apk_libraries_dir = '/data/data/%s/lib' % apk_package
+
+  device_dir = os.path.dirname(options.script_device_path)
+  mkdir_cmd = ('if [ ! -e %(dir)s ]; then mkdir -p %(dir)s; fi ' %
+      { 'dir': device_dir })
+  RunShellCommand(device, mkdir_cmd)
+  device.PushChangedFiles([(os.path.abspath(options.script_host_path),
+                            options.script_device_path)])
+
+  trigger_cmd = (
+      'APK_LIBRARIES_DIR=%(apk_libraries_dir)s; '
+      'STRIPPED_LIBRARIES_DIR=%(target_dir)s; '
+      '. %(script_device_path)s'
+      ) % {
+          'apk_libraries_dir': apk_libraries_dir,
+          'target_dir': options.target_dir,
+          'script_device_path': options.script_device_path
+          }
+  RunShellCommand(device, trigger_cmd)
+
+
+def main(args):
+  args = build_utils.ExpandFileArgs(args)
+  parser = optparse.OptionParser()
+  parser.add_option('--apk', help='Path to the apk.')
+  parser.add_option('--script-host-path',
+      help='Path on the host for the symlink script.')
+  parser.add_option('--script-device-path',
+      help='Path on the device to push the created symlink script.')
+  parser.add_option('--libraries',
+      help='List of native libraries.')
+  parser.add_option('--target-dir',
+      help='Device directory that contains the target libraries for symlinks.')
+  parser.add_option('--stamp', help='Path to touch on success.')
+  parser.add_option('--build-device-configuration',
+      help='Path to build device configuration.')
+  parser.add_option('--configuration-name',
+      help='The build CONFIGURATION_NAME')
+  parser.add_option('--output-directory',
+      help='The output directory')
+  options, _ = parser.parse_args(args)
+
+  required_options = ['apk', 'libraries', 'script_host_path',
+      'script_device_path', 'target_dir', 'configuration_name']
+  build_utils.CheckOptions(options, parser, required=required_options)
+  constants.SetBuildType(options.configuration_name)
+
+  devil_chromium.Initialize(
+      output_directory=os.path.abspath(options.output_directory))
+
+  CreateSymlinkScript(options)
+  TriggerSymlinkScript(options)
+
+  if options.stamp:
+    build_utils.Touch(options.stamp)
+
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv[1:]))
diff --git a/build/android/gyp/create_dist_jar.py b/build/android/gyp/create_dist_jar.py
new file mode 100755
index 0000000..0d31c5d
--- /dev/null
+++ b/build/android/gyp/create_dist_jar.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Merges a list of jars into a single jar."""
+
+import optparse
+import sys
+
+from util import build_utils
+
+def main(args):
+  args = build_utils.ExpandFileArgs(args)
+  parser = optparse.OptionParser()
+  build_utils.AddDepfileOption(parser)
+  parser.add_option('--output', help='Path to output jar.')
+  parser.add_option('--inputs', action='append', help='List of jar inputs.')
+  options, _ = parser.parse_args(args)
+  build_utils.CheckOptions(options, parser, ['output', 'inputs'])
+
+  input_jars = []
+  for inputs_arg in options.inputs:
+    input_jars.extend(build_utils.ParseGypList(inputs_arg))
+
+  build_utils.MergeZips(options.output, input_jars)
+
+  if options.depfile:
+    build_utils.WriteDepfile(
+        options.depfile,
+        input_jars + build_utils.GetPythonDependencies())
+
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv[1:]))
diff --git a/build/android/gyp/create_java_binary_script.py b/build/android/gyp/create_java_binary_script.py
new file mode 100755
index 0000000..2b6553d
--- /dev/null
+++ b/build/android/gyp/create_java_binary_script.py
@@ -0,0 +1,102 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Creates a simple script to run a java "binary".
+
+This creates a script that sets up the java command line for running a java
+jar. This includes correctly setting the classpath and the main class.
+"""
+
+import optparse
+import os
+import sys
+
+from util import build_utils
+
+# The java command must be executed in the current directory because there may
+# be user-supplied paths in the args. The script receives the classpath relative
+# to the directory that the script is written in and then, when run, must
+# recalculate the paths relative to the current directory.
+script_template = """\
+#!/usr/bin/env python
+#
+# This file was generated by build/android/gyp/create_java_binary_script.py
+
+import os
+import sys
+
+self_dir = os.path.dirname(__file__)
+classpath = [{classpath}]
+bootclasspath = [{bootclasspath}]
+extra_program_args = {extra_program_args}
+if os.getcwd() != self_dir:
+  offset = os.path.relpath(self_dir, os.getcwd())
+  classpath = [os.path.join(offset, p) for p in classpath]
+  bootclasspath = [os.path.join(offset, p) for p in bootclasspath]
+java_cmd = ["java"]
+{noverify_flag}
+if bootclasspath:
+    java_cmd.append("-Xbootclasspath/p:" + ":".join(bootclasspath))
+java_cmd.extend(
+    ["-classpath", ":".join(classpath), "-enableassertions", \"{main_class}\"])
+java_cmd.extend(extra_program_args)
+java_cmd.extend(sys.argv[1:])
+os.execvp("java", java_cmd)
+"""
+
+def main(argv):
+  argv = build_utils.ExpandFileArgs(argv)
+  parser = optparse.OptionParser()
+  build_utils.AddDepfileOption(parser)
+  parser.add_option('--output', help='Output path for executable script.')
+  parser.add_option('--jar-path', help='Path to the main jar.')
+  parser.add_option('--main-class',
+      help='Name of the java class with the "main" entry point.')
+  parser.add_option('--classpath', action='append', default=[],
+      help='Classpath for running the jar.')
+  parser.add_option('--bootclasspath', action='append', default=[],
+      help='zip/jar files to add to bootclasspath for java cmd.')
+  parser.add_option('--noverify', action='store_true',
+      help='JVM flag: noverify.')
+
+  options, extra_program_args = parser.parse_args(argv)
+
+  if (options.noverify):
+    noverify_flag = 'java_cmd.append("-noverify")'
+  else:
+    noverify_flag = ''
+
+  classpath = [options.jar_path]
+  for cp_arg in options.classpath:
+    classpath += build_utils.ParseGypList(cp_arg)
+
+  bootclasspath = []
+  for bootcp_arg in options.bootclasspath:
+    bootclasspath += build_utils.ParseGypList(bootcp_arg)
+
+  run_dir = os.path.dirname(options.output)
+  bootclasspath = [os.path.relpath(p, run_dir) for p in bootclasspath]
+  classpath = [os.path.relpath(p, run_dir) for p in classpath]
+
+  with open(options.output, 'w') as script:
+    script.write(script_template.format(
+      classpath=('"%s"' % '", "'.join(classpath)),
+      bootclasspath=('"%s"' % '", "'.join(bootclasspath)
+                     if bootclasspath else ''),
+      main_class=options.main_class,
+      extra_program_args=repr(extra_program_args),
+      noverify_flag=noverify_flag))
+
+  os.chmod(options.output, 0750)
+
+  if options.depfile:
+    build_utils.WriteDepfile(
+        options.depfile,
+        build_utils.GetPythonDependencies())
+
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv[1:]))
diff --git a/build/android/gyp/create_placeholder_files.py b/build/android/gyp/create_placeholder_files.py
new file mode 100755
index 0000000..103e1df
--- /dev/null
+++ b/build/android/gyp/create_placeholder_files.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Create placeholder files.
+"""
+
+import optparse
+import os
+import sys
+
+from util import build_utils
+
+def main():
+  parser = optparse.OptionParser()
+  parser.add_option(
+      '--dest-lib-dir',
+      help='Destination directory to have placeholder files.')
+  parser.add_option(
+      '--stamp',
+      help='Path to touch on success')
+
+  options, args = parser.parse_args()
+
+  for name in args:
+    target_path = os.path.join(options.dest_lib_dir, name)
+    build_utils.Touch(target_path)
+
+  if options.stamp:
+    build_utils.Touch(options.stamp)
+
+if __name__ == '__main__':
+  sys.exit(main())
+
diff --git a/build/android/gyp/create_standalone_apk.py b/build/android/gyp/create_standalone_apk.py
new file mode 100755
index 0000000..c560599
--- /dev/null
+++ b/build/android/gyp/create_standalone_apk.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+#
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Combines stripped libraries and incomplete APK into single standalone APK.
+
+"""
+
+import optparse
+import os
+import shutil
+import sys
+import tempfile
+
+from util import build_utils
+from util import md5_check
+
+def CreateStandaloneApk(options):
+  def DoZip():
+    with tempfile.NamedTemporaryFile(suffix='.zip') as intermediate_file:
+      intermediate_path = intermediate_file.name
+      shutil.copy(options.input_apk_path, intermediate_path)
+      apk_path_abs = os.path.abspath(intermediate_path)
+      build_utils.CheckOutput(
+          ['zip', '-r', '-1', apk_path_abs, 'lib'],
+          cwd=options.libraries_top_dir)
+      shutil.copy(intermediate_path, options.output_apk_path)
+
+  input_paths = [options.input_apk_path, options.libraries_top_dir]
+  record_path = '%s.standalone.stamp' % options.input_apk_path
+  md5_check.CallAndRecordIfStale(
+      DoZip,
+      record_path=record_path,
+      input_paths=input_paths)
+
+
+def main():
+  parser = optparse.OptionParser()
+  parser.add_option('--libraries-top-dir',
+      help='Top directory that contains libraries '
+      '(i.e. library paths are like '
+      'libraries_top_dir/lib/android_app_abi/foo.so).')
+  parser.add_option('--input-apk-path', help='Path to incomplete APK.')
+  parser.add_option('--output-apk-path', help='Path for standalone APK.')
+  parser.add_option('--stamp', help='Path to touch on success.')
+  options, _ = parser.parse_args()
+
+  required_options = ['libraries_top_dir', 'input_apk_path', 'output_apk_path']
+  build_utils.CheckOptions(options, parser, required=required_options)
+
+  CreateStandaloneApk(options)
+
+  if options.stamp:
+    build_utils.Touch(options.stamp)
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/build/android/gyp/create_test_runner_script.py b/build/android/gyp/create_test_runner_script.py
new file mode 100755
index 0000000..be15dfd
--- /dev/null
+++ b/build/android/gyp/create_test_runner_script.py
@@ -0,0 +1,131 @@
+#!/usr/bin/env python
+#
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Creates a script to run an android test using build/android/test_runner.py.
+"""
+
+import argparse
+import os
+import sys
+
+from util import build_utils
+
+SCRIPT_TEMPLATE = """\
+#!/usr/bin/env python
+#
+# This file was generated by build/android/gyp/create_test_runner_script.py
+
+import os
+import subprocess
+import sys
+
+def main():
+  script_directory = os.path.dirname(__file__)
+
+  def ResolvePath(path):
+    \"\"\"Returns an absolute filepath given a path relative to this script.
+    \"\"\"
+    return os.path.abspath(os.path.join(script_directory, path))
+
+  test_runner_path = ResolvePath('{test_runner_path}')
+  test_runner_args = {test_runner_args}
+  test_runner_path_args = {test_runner_path_args}
+  for arg, path in test_runner_path_args:
+    test_runner_args.extend([arg, ResolvePath(path)])
+
+  test_runner_cmd = [test_runner_path] + test_runner_args + sys.argv[1:]
+  return subprocess.call(test_runner_cmd)
+
+if __name__ == '__main__':
+  sys.exit(main())
+"""
+
+def main(args):
+  parser = argparse.ArgumentParser()
+  parser.add_argument('--script-output-path',
+                      help='Output path for executable script.')
+  parser.add_argument('--depfile',
+                      help='Path to the depfile. This must be specified as '
+                           "the action's first output.")
+  parser.add_argument('--test-runner-path',
+                      help='Path to test_runner.py (optional).')
+  # We need to intercept any test runner path arguments and make all
+  # of the paths relative to the output script directory.
+  group = parser.add_argument_group('Test runner path arguments.')
+  group.add_argument('--additional-apk', action='append',
+                     dest='additional_apks', default=[])
+  group.add_argument('--additional-apk-list')
+  group.add_argument('--apk-under-test')
+  group.add_argument('--apk-under-test-incremental-install-script')
+  group.add_argument('--executable-dist-dir')
+  group.add_argument('--isolate-file-path')
+  group.add_argument('--output-directory')
+  group.add_argument('--test-apk')
+  group.add_argument('--test-apk-incremental-install-script')
+  group.add_argument('--coverage-dir')
+  args, test_runner_args = parser.parse_known_args(
+      build_utils.ExpandFileArgs(args))
+
+  def RelativizePathToScript(path):
+    """Returns the path relative to the output script directory."""
+    return os.path.relpath(path, os.path.dirname(args.script_output_path))
+
+  test_runner_path = args.test_runner_path or os.path.join(
+      os.path.dirname(__file__), os.path.pardir, 'test_runner.py')
+  test_runner_path = RelativizePathToScript(test_runner_path)
+
+  test_runner_path_args = []
+  if args.additional_apk_list:
+    args.additional_apks.extend(
+        build_utils.ParseGypList(args.additional_apk_list))
+  if args.additional_apks:
+    test_runner_path_args.extend(
+        ('--additional-apk', RelativizePathToScript(a))
+        for a in args.additional_apks)
+  if args.apk_under_test:
+    test_runner_path_args.append(
+        ('--apk-under-test', RelativizePathToScript(args.apk_under_test)))
+  if args.apk_under_test_incremental_install_script:
+    test_runner_path_args.append(
+        ('--apk-under-test-incremental-install-script',
+         RelativizePathToScript(
+             args.apk_under_test_incremental_install_script)))
+  if args.executable_dist_dir:
+    test_runner_path_args.append(
+        ('--executable-dist-dir',
+         RelativizePathToScript(args.executable_dist_dir)))
+  if args.isolate_file_path:
+    test_runner_path_args.append(
+        ('--isolate-file-path', RelativizePathToScript(args.isolate_file_path)))
+  if args.output_directory:
+    test_runner_path_args.append(
+        ('--output-directory', RelativizePathToScript(args.output_directory)))
+  if args.test_apk:
+    test_runner_path_args.append(
+        ('--test-apk', RelativizePathToScript(args.test_apk)))
+  if args.test_apk_incremental_install_script:
+    test_runner_path_args.append(
+        ('--test-apk-incremental-install-script',
+         RelativizePathToScript(args.test_apk_incremental_install_script)))
+  if args.coverage_dir:
+    test_runner_path_args.append(
+        ('--coverage-dir', RelativizePathToScript(args.coverage_dir)))
+
+  with open(args.script_output_path, 'w') as script:
+    script.write(SCRIPT_TEMPLATE.format(
+        test_runner_path=str(test_runner_path),
+        test_runner_args=str(test_runner_args),
+        test_runner_path_args=str(test_runner_path_args)))
+
+  os.chmod(args.script_output_path, 0750)
+
+  if args.depfile:
+    build_utils.WriteDepfile(
+        args.depfile,
+        build_utils.GetPythonDependencies())
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv[1:]))
diff --git a/build/android/gyp/create_tool_wrapper.py b/build/android/gyp/create_tool_wrapper.py
new file mode 100755
index 0000000..4433004
--- /dev/null
+++ b/build/android/gyp/create_tool_wrapper.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+#
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Creates a simple wrapper script that passes the correct --output-directory.
+"""
+
+import argparse
+import os
+
+_TEMPLATE = """\
+#!/usr/bin/env python
+#
+# This file was generated by //build/android/gyp/create_tool_script.py
+
+import os
+import sys
+
+cmd = '{cmd}'
+args = [os.path.basename(cmd), '{flag_name}={output_directory}'] + sys.argv[1:]
+os.execv(cmd, args)
+"""
+
+def main():
+  parser = argparse.ArgumentParser()
+  parser.add_argument('--output', help='Output path for executable script.')
+  parser.add_argument('--target', help='Path to script being wrapped.')
+  parser.add_argument('--output-directory', help='Value for --output-directory')
+  parser.add_argument('--flag-name',
+                      help='Flag name to use instead of --output-directory',
+                      default='--output-directory')
+  args = parser.parse_args()
+
+  with open(args.output, 'w') as script:
+    script.write(_TEMPLATE.format(
+        cmd=os.path.abspath(args.target),
+        flag_name=args.flag_name,
+        output_directory=os.path.abspath(args.output_directory)))
+
+  os.chmod(args.output, 0750)
+
+
+if __name__ == '__main__':
+  main()
diff --git a/build/android/gyp/dex.py b/build/android/gyp/dex.py
new file mode 100755
index 0000000..9400ff2
--- /dev/null
+++ b/build/android/gyp/dex.py
@@ -0,0 +1,204 @@
+#!/usr/bin/env python
+#
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import logging
+import optparse
+import os
+import sys
+import tempfile
+import zipfile
+
+from util import build_utils
+
+
+def _RemoveUnwantedFilesFromZip(dex_path):
+  iz = zipfile.ZipFile(dex_path, 'r')
+  tmp_dex_path = '%s.tmp.zip' % dex_path
+  oz = zipfile.ZipFile(tmp_dex_path, 'w', zipfile.ZIP_DEFLATED)
+  for i in iz.namelist():
+    if i.endswith('.dex'):
+      oz.writestr(i, iz.read(i))
+  os.remove(dex_path)
+  os.rename(tmp_dex_path, dex_path)
+
+
+def _ParseArgs(args):
+  args = build_utils.ExpandFileArgs(args)
+
+  parser = optparse.OptionParser()
+  build_utils.AddDepfileOption(parser)
+
+  parser.add_option('--android-sdk-tools',
+                    help='Android sdk build tools directory.')
+  parser.add_option('--output-directory',
+                    default=os.getcwd(),
+                    help='Path to the output build directory.')
+  parser.add_option('--dex-path', help='Dex output path.')
+  parser.add_option('--configuration-name',
+                    help='The build CONFIGURATION_NAME.')
+  parser.add_option('--proguard-enabled',
+                    help='"true" if proguard is enabled.')
+  parser.add_option('--debug-build-proguard-enabled',
+                    help='"true" if proguard is enabled for debug build.')
+  parser.add_option('--proguard-enabled-input-path',
+                    help=('Path to dex in Release mode when proguard '
+                          'is enabled.'))
+  parser.add_option('--no-locals', default='0',
+                    help='Exclude locals list from the dex file.')
+  parser.add_option('--incremental',
+                    action='store_true',
+                    help='Enable incremental builds when possible.')
+  parser.add_option('--inputs', help='A list of additional input paths.')
+  parser.add_option('--excluded-paths',
+                    help='A list of paths to exclude from the dex file.')
+  parser.add_option('--main-dex-list-path',
+                    help='A file containing a list of the classes to '
+                         'include in the main dex.')
+  parser.add_option('--multidex-configuration-path',
+                    help='A JSON file containing multidex build configuration.')
+  parser.add_option('--multi-dex', default=False, action='store_true',
+                    help='Generate multiple dex files.')
+
+  options, paths = parser.parse_args(args)
+
+  required_options = ('android_sdk_tools',)
+  build_utils.CheckOptions(options, parser, required=required_options)
+
+  if options.multidex_configuration_path:
+    with open(options.multidex_configuration_path) as multidex_config_file:
+      multidex_config = json.loads(multidex_config_file.read())
+    options.multi_dex = multidex_config.get('enabled', False)
+
+  if options.multi_dex and not options.main_dex_list_path:
+    logging.warning('multidex cannot be enabled without --main-dex-list-path')
+    options.multi_dex = False
+  elif options.main_dex_list_path and not options.multi_dex:
+    logging.warning('--main-dex-list-path is unused if multidex is not enabled')
+
+  if options.inputs:
+    options.inputs = build_utils.ParseGypList(options.inputs)
+  if options.excluded_paths:
+    options.excluded_paths = build_utils.ParseGypList(options.excluded_paths)
+
+  return options, paths
+
+
+def _AllSubpathsAreClassFiles(paths, changes):
+  for path in paths:
+    if any(not p.endswith('.class') for p in changes.IterChangedSubpaths(path)):
+      return False
+  return True
+
+
+def _DexWasEmpty(paths, changes):
+  for path in paths:
+    if any(p.endswith('.class')
+           for p in changes.old_metadata.IterSubpaths(path)):
+      return False
+  return True
+
+
+def _RunDx(changes, options, dex_cmd, paths):
+  with build_utils.TempDir() as classes_temp_dir:
+    # --multi-dex is incompatible with --incremental.
+    if options.multi_dex:
+      dex_cmd.append('--main-dex-list=%s' % options.main_dex_list_path)
+    else:
+      # Use --incremental when .class files are added or modified (never when
+      # removed).
+      # --incremental tells dx to merge all newly dex'ed .class files with
+      # what that already exist in the output dex file (existing classes are
+      # replaced).
+      if options.incremental and changes.AddedOrModifiedOnly():
+        changed_inputs = set(changes.IterChangedPaths())
+        changed_paths = [p for p in paths if p in changed_inputs]
+        if not changed_paths:
+          return
+        # When merging in other dex files, there's no easy way to know if
+        # classes were removed from them.
+        if (_AllSubpathsAreClassFiles(changed_paths, changes)
+            and not _DexWasEmpty(changed_paths, changes)):
+          dex_cmd.append('--incremental')
+          for path in changed_paths:
+            changed_subpaths = set(changes.IterChangedSubpaths(path))
+            # Not a fundamental restriction, but it's the case right now and it
+            # simplifies the logic to assume so.
+            assert changed_subpaths, 'All inputs should be zip files.'
+            build_utils.ExtractAll(path, path=classes_temp_dir,
+                                   predicate=lambda p: p in changed_subpaths)
+          paths = [classes_temp_dir]
+
+    dex_cmd += paths
+    build_utils.CheckOutput(dex_cmd, print_stderr=False)
+
+  if options.dex_path.endswith('.zip'):
+    _RemoveUnwantedFilesFromZip(options.dex_path)
+
+
+def _OnStaleMd5(changes, options, dex_cmd, paths):
+  _RunDx(changes, options, dex_cmd, paths)
+  build_utils.WriteJson(
+      [os.path.relpath(p, options.output_directory) for p in paths],
+      options.dex_path + '.inputs')
+
+
+def main(args):
+  options, paths = _ParseArgs(args)
+  if ((options.proguard_enabled == 'true'
+          and options.configuration_name == 'Release')
+      or (options.debug_build_proguard_enabled == 'true'
+          and options.configuration_name == 'Debug')):
+    paths = [options.proguard_enabled_input_path]
+
+  if options.inputs:
+    paths += options.inputs
+
+  if options.excluded_paths:
+    # Excluded paths are relative to the output directory.
+    exclude_paths = options.excluded_paths
+    paths = [p for p in paths if not
+             os.path.relpath(p, options.output_directory) in exclude_paths]
+
+  input_paths = list(paths)
+
+  dx_binary = os.path.join(options.android_sdk_tools, 'dx')
+  # See http://crbug.com/272064 for context on --force-jumbo.
+  # See https://github.com/android/platform_dalvik/commit/dd140a22d for
+  # --num-threads.
+  dex_cmd = [dx_binary, '--num-threads=8', '--dex', '--force-jumbo',
+             '--output', options.dex_path]
+  if options.no_locals != '0':
+    dex_cmd.append('--no-locals')
+
+  if options.multi_dex:
+    input_paths.append(options.main_dex_list_path)
+    dex_cmd += [
+      '--multi-dex',
+      '--minimal-main-dex',
+    ]
+
+  output_paths = [
+    options.dex_path,
+    options.dex_path + '.inputs',
+  ]
+
+  # An escape hatch to be able to check if incremental dexing is causing
+  # problems.
+  force = int(os.environ.get('DISABLE_INCREMENTAL_DX', 0))
+
+  build_utils.CallAndWriteDepfileIfStale(
+      lambda changes: _OnStaleMd5(changes, options, dex_cmd, paths),
+      options,
+      input_paths=input_paths,
+      input_strings=dex_cmd,
+      output_paths=output_paths,
+      force=force,
+      pass_changes=True)
+
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv[1:]))
diff --git a/build/android/gyp/emma_instr.py b/build/android/gyp/emma_instr.py
new file mode 100755
index 0000000..9ba6776
--- /dev/null
+++ b/build/android/gyp/emma_instr.py
@@ -0,0 +1,230 @@
+#!/usr/bin/env python
+#
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Instruments classes and jar files.
+
+This script corresponds to the 'emma_instr' action in the java build process.
+Depending on whether emma_instrument is set, the 'emma_instr' action will either
+call the instrument command or the copy command.
+
+Possible commands are:
+- instrument_jar: Accepts a jar and instruments it using emma.jar.
+- copy: Called when EMMA coverage is not enabled. This allows us to make
+      this a required step without necessarily instrumenting on every build.
+      Also removes any stale coverage files.
+"""
+
+import collections
+import json
+import os
+import shutil
+import sys
+import tempfile
+
+sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
+from pylib.utils import command_option_parser
+
+from util import build_utils
+
+
+def _AddCommonOptions(option_parser):
+  """Adds common options to |option_parser|."""
+  build_utils.AddDepfileOption(option_parser)
+  option_parser.add_option('--input-path',
+                           help=('Path to input file(s). Either the classes '
+                                 'directory, or the path to a jar.'))
+  option_parser.add_option('--output-path',
+                           help=('Path to output final file(s) to. Either the '
+                                 'final classes directory, or the directory in '
+                                 'which to place the instrumented/copied jar.'))
+  option_parser.add_option('--stamp', help='Path to touch when done.')
+  option_parser.add_option('--coverage-file',
+                           help='File to create with coverage metadata.')
+  option_parser.add_option('--sources-list-file',
+                           help='File to create with the list of sources.')
+
+
+def _AddInstrumentOptions(option_parser):
+  """Adds options related to instrumentation to |option_parser|."""
+  _AddCommonOptions(option_parser)
+  option_parser.add_option('--source-dirs',
+                           help='Space separated list of source directories. '
+                                'source-files should not be specified if '
+                                'source-dirs is specified')
+  option_parser.add_option('--source-files',
+                           help='Space separated list of source files. '
+                                'source-dirs should not be specified if '
+                                'source-files is specified')
+  option_parser.add_option('--src-root',
+                           help='Root of the src repository.')
+  option_parser.add_option('--emma-jar',
+                           help='Path to emma.jar.')
+  option_parser.add_option(
+      '--filter-string', default='',
+      help=('Filter string consisting of a list of inclusion/exclusion '
+            'patterns separated with whitespace and/or comma.'))
+
+
+def _RunCopyCommand(_command, options, _, option_parser):
+  """Copies the jar from input to output locations.
+
+  Also removes any old coverage/sources file.
+
+  Args:
+    command: String indicating the command that was received to trigger
+        this function.
+    options: optparse options dictionary.
+    args: List of extra args from optparse.
+    option_parser: optparse.OptionParser object.
+
+  Returns:
+    An exit code.
+  """
+  if not (options.input_path and options.output_path and
+          options.coverage_file and options.sources_list_file):
+    option_parser.error('All arguments are required.')
+
+  if os.path.exists(options.coverage_file):
+    os.remove(options.coverage_file)
+  if os.path.exists(options.sources_list_file):
+    os.remove(options.sources_list_file)
+
+  shutil.copy(options.input_path, options.output_path)
+
+  if options.stamp:
+    build_utils.Touch(options.stamp)
+
+  if options.depfile:
+    build_utils.WriteDepfile(options.depfile,
+                             build_utils.GetPythonDependencies())
+
+
+def _GetSourceDirsFromSourceFiles(source_files_string):
+  """Returns list of directories for the files in |source_files_string|.
+
+  Args:
+    source_files_string: String generated from GN or GYP containing the list
+      of source files.
+
+  Returns:
+    List of source directories.
+  """
+  source_files = build_utils.ParseGypList(source_files_string)
+  return list(set(os.path.dirname(source_file) for source_file in source_files))
+
+
+def _CreateSourcesListFile(source_dirs, sources_list_file, src_root):
+  """Adds all normalized source directories to |sources_list_file|.
+
+  Args:
+    source_dirs: List of source directories.
+    sources_list_file: File into which to write the JSON list of sources.
+    src_root: Root which sources added to the file should be relative to.
+
+  Returns:
+    An exit code.
+  """
+  src_root = os.path.abspath(src_root)
+  relative_sources = []
+  for s in source_dirs:
+    abs_source = os.path.abspath(s)
+    if abs_source[:len(src_root)] != src_root:
+      print ('Error: found source directory not under repository root: %s %s'
+             % (abs_source, src_root))
+      return 1
+    rel_source = os.path.relpath(abs_source, src_root)
+
+    relative_sources.append(rel_source)
+
+  with open(sources_list_file, 'w') as f:
+    json.dump(relative_sources, f)
+
+
+def _RunInstrumentCommand(_command, options, _, option_parser):
+  """Instruments jar files using EMMA.
+
+  Args:
+    command: String indicating the command that was received to trigger
+        this function.
+    options: optparse options dictionary.
+    args: List of extra args from optparse.
+    option_parser: optparse.OptionParser object.
+
+  Returns:
+    An exit code.
+  """
+  if not (options.input_path and options.output_path and
+          options.coverage_file and options.sources_list_file and
+          (options.source_files or options.source_dirs) and
+          options.src_root and options.emma_jar):
+    option_parser.error('All arguments are required.')
+
+  if os.path.exists(options.coverage_file):
+    os.remove(options.coverage_file)
+  temp_dir = tempfile.mkdtemp()
+  try:
+    cmd = ['java', '-cp', options.emma_jar,
+           'emma', 'instr',
+           '-ip', options.input_path,
+           '-ix', options.filter_string,
+           '-d', temp_dir,
+           '-out', options.coverage_file,
+           '-m', 'fullcopy']
+    build_utils.CheckOutput(cmd)
+
+    # File is not generated when filter_string doesn't match any files.
+    if not os.path.exists(options.coverage_file):
+      build_utils.Touch(options.coverage_file)
+
+    temp_jar_dir = os.path.join(temp_dir, 'lib')
+    jars = os.listdir(temp_jar_dir)
+    if len(jars) != 1:
+      print('Error: multiple output files in: %s' % (temp_jar_dir))
+      return 1
+
+    # Delete output_path first to avoid modifying input_path in the case where
+    # input_path is a hardlink to output_path. http://crbug.com/571642
+    if os.path.exists(options.output_path):
+      os.unlink(options.output_path)
+    shutil.move(os.path.join(temp_jar_dir, jars[0]), options.output_path)
+  finally:
+    shutil.rmtree(temp_dir)
+
+  if options.source_dirs:
+    source_dirs = build_utils.ParseGypList(options.source_dirs)
+  else:
+    source_dirs = _GetSourceDirsFromSourceFiles(options.source_files)
+  _CreateSourcesListFile(source_dirs, options.sources_list_file,
+                         options.src_root)
+
+  if options.stamp:
+    build_utils.Touch(options.stamp)
+
+  if options.depfile:
+    build_utils.WriteDepfile(options.depfile,
+                             build_utils.GetPythonDependencies())
+
+  return 0
+
+
+CommandFunctionTuple = collections.namedtuple(
+    'CommandFunctionTuple', ['add_options_func', 'run_command_func'])
+VALID_COMMANDS = {
+    'copy': CommandFunctionTuple(_AddCommonOptions,
+                                 _RunCopyCommand),
+    'instrument_jar': CommandFunctionTuple(_AddInstrumentOptions,
+                                           _RunInstrumentCommand),
+}
+
+
+def main():
+  option_parser = command_option_parser.CommandOptionParser(
+      commands_dict=VALID_COMMANDS)
+  command_option_parser.ParseAndExecute(option_parser)
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/build/android/gyp/finalize_apk.py b/build/android/gyp/finalize_apk.py
new file mode 100755
index 0000000..d71cb8f
--- /dev/null
+++ b/build/android/gyp/finalize_apk.py
@@ -0,0 +1,161 @@
+#!/usr/bin/env python
+#
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Signs and zipaligns APK.
+
+"""
+
+import optparse
+import shutil
+import sys
+import tempfile
+
+from util import build_utils
+
+def RenameInflateAndAddPageAlignment(
+    rezip_apk_jar_path, in_zip_file, out_zip_file):
+  rezip_apk_cmd = [
+      'java',
+      '-classpath',
+      rezip_apk_jar_path,
+      'RezipApk',
+      'renamealign',
+      in_zip_file,
+      out_zip_file,
+    ]
+  build_utils.CheckOutput(rezip_apk_cmd)
+
+
+def ReorderAndAlignApk(rezip_apk_jar_path, in_zip_file, out_zip_file):
+  rezip_apk_cmd = [
+      'java',
+      '-classpath',
+      rezip_apk_jar_path,
+      'RezipApk',
+      'reorder',
+      in_zip_file,
+      out_zip_file,
+    ]
+  build_utils.CheckOutput(rezip_apk_cmd)
+
+
+def JarSigner(key_path, key_name, key_passwd, unsigned_path, signed_path):
+  shutil.copy(unsigned_path, signed_path)
+  sign_cmd = [
+      'jarsigner',
+      '-sigalg', 'MD5withRSA',
+      '-digestalg', 'SHA1',
+      '-keystore', key_path,
+      '-storepass', key_passwd,
+      signed_path,
+      key_name,
+    ]
+  build_utils.CheckOutput(sign_cmd)
+
+
+def AlignApk(zipalign_path, package_align, unaligned_path, final_path):
+  align_cmd = [
+      zipalign_path,
+      '-f'
+      ]
+
+  if package_align:
+    align_cmd += ['-p']
+
+  align_cmd += [
+      '4',  # 4 bytes
+      unaligned_path,
+      final_path,
+      ]
+  build_utils.CheckOutput(align_cmd)
+
+
+def main(args):
+  args = build_utils.ExpandFileArgs(args)
+
+  parser = optparse.OptionParser()
+  build_utils.AddDepfileOption(parser)
+
+  parser.add_option('--rezip-apk-jar-path',
+                    help='Path to the RezipApk jar file.')
+  parser.add_option('--zipalign-path', help='Path to the zipalign tool.')
+  parser.add_option('--page-align-shared-libraries',
+                    action='store_true',
+                    help='Page align shared libraries.')
+  parser.add_option('--unsigned-apk-path', help='Path to input unsigned APK.')
+  parser.add_option('--final-apk-path',
+      help='Path to output signed and aligned APK.')
+  parser.add_option('--key-path', help='Path to keystore for signing.')
+  parser.add_option('--key-passwd', help='Keystore password')
+  parser.add_option('--key-name', help='Keystore name')
+  parser.add_option('--stamp', help='Path to touch on success.')
+  parser.add_option('--load-library-from-zip', type='int',
+      help='If non-zero, build the APK such that the library can be loaded ' +
+           'directly from the zip file using the crazy linker. The library ' +
+           'will be renamed, uncompressed and page aligned.')
+
+  options, _ = parser.parse_args()
+
+  input_paths = [
+    options.unsigned_apk_path,
+    options.key_path,
+  ]
+
+  if options.load_library_from_zip:
+    input_paths.append(options.rezip_apk_jar_path)
+
+  input_strings = [
+    options.load_library_from_zip,
+    options.key_name,
+    options.key_passwd,
+    options.page_align_shared_libraries,
+  ]
+
+  build_utils.CallAndWriteDepfileIfStale(
+      lambda: FinalizeApk(options),
+      options,
+      record_path=options.unsigned_apk_path + '.finalize.md5.stamp',
+      input_paths=input_paths,
+      input_strings=input_strings,
+      output_paths=[options.final_apk_path])
+
+
+def FinalizeApk(options):
+  with tempfile.NamedTemporaryFile() as signed_apk_path_tmp, \
+      tempfile.NamedTemporaryFile() as apk_to_sign_tmp:
+
+    if options.load_library_from_zip:
+      # We alter the name of the library so that the Android Package Manager
+      # does not extract it into a separate file. This must be done before
+      # signing, as the filename is part of the signed manifest. At the same
+      # time we uncompress the library, which is necessary so that it can be
+      # loaded directly from the APK.
+      # Move the library to a page boundary by adding a page alignment file.
+      apk_to_sign = apk_to_sign_tmp.name
+      RenameInflateAndAddPageAlignment(
+          options.rezip_apk_jar_path, options.unsigned_apk_path, apk_to_sign)
+    else:
+      apk_to_sign = options.unsigned_apk_path
+
+    signed_apk_path = signed_apk_path_tmp.name
+    JarSigner(options.key_path, options.key_name, options.key_passwd,
+              apk_to_sign, signed_apk_path)
+
+    if options.load_library_from_zip:
+      # Reorder the contents of the APK. This re-establishes the canonical
+      # order which means the library will be back at its page aligned location.
+      # This step also aligns uncompressed items to 4 bytes.
+      ReorderAndAlignApk(
+          options.rezip_apk_jar_path, signed_apk_path, options.final_apk_path)
+    else:
+      # Align uncompressed items to 4 bytes
+      AlignApk(options.zipalign_path,
+               options.page_align_shared_libraries,
+               signed_apk_path,
+               options.final_apk_path)
+
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv[1:]))
diff --git a/build/android/gyp/finalize_splits.py b/build/android/gyp/finalize_splits.py
new file mode 100755
index 0000000..a6796bb
--- /dev/null
+++ b/build/android/gyp/finalize_splits.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+#
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Signs and zipaligns split APKs.
+
+This script is require only by GYP (not GN).
+"""
+
+import optparse
+import sys
+
+import finalize_apk
+from util import build_utils
+
+def main():
+  parser = optparse.OptionParser()
+  parser.add_option('--zipalign-path', help='Path to the zipalign tool.')
+  parser.add_option('--resource-packaged-apk-path',
+      help='Base path to input .ap_s.')
+  parser.add_option('--base-output-path',
+      help='Path to output .apk, minus extension.')
+  parser.add_option('--key-path', help='Path to keystore for signing.')
+  parser.add_option('--key-passwd', help='Keystore password')
+  parser.add_option('--key-name', help='Keystore name')
+  parser.add_option('--densities',
+      help='Comma separated list of densities finalize.')
+  parser.add_option('--languages',
+      help='GYP list of language splits to finalize.')
+
+  options, _ = parser.parse_args()
+  options.load_library_from_zip = 0
+
+  if options.densities:
+    for density in options.densities.split(','):
+      options.unsigned_apk_path = ("%s_%s" %
+          (options.resource_packaged_apk_path, density))
+      options.final_apk_path = ("%s-density-%s.apk" %
+          (options.base_output_path, density))
+      finalize_apk.FinalizeApk(options)
+
+  if options.languages:
+    for lang in build_utils.ParseGypList(options.languages):
+      options.unsigned_apk_path = ("%s_%s" %
+          (options.resource_packaged_apk_path, lang))
+      options.final_apk_path = ("%s-lang-%s.apk" %
+          (options.base_output_path, lang))
+      finalize_apk.FinalizeApk(options)
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/build/android/gyp/find.py b/build/android/gyp/find.py
new file mode 100755
index 0000000..a9f1d49
--- /dev/null
+++ b/build/android/gyp/find.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Finds files in directories.
+"""
+
+import fnmatch
+import optparse
+import os
+import sys
+
+
+def main(argv):
+  parser = optparse.OptionParser()
+  parser.add_option('--pattern', default='*', help='File pattern to match.')
+  options, directories = parser.parse_args(argv)
+
+  for d in directories:
+    if not os.path.exists(d):
+      print >> sys.stderr, '%s does not exist' % d
+      return 1
+    for root, _, filenames in os.walk(d):
+      for f in fnmatch.filter(filenames, options.pattern):
+        print os.path.join(root, f)
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv[1:]))
diff --git a/build/android/gyp/find_sun_tools_jar.py b/build/android/gyp/find_sun_tools_jar.py
new file mode 100755
index 0000000..2f15a15
--- /dev/null
+++ b/build/android/gyp/find_sun_tools_jar.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""This finds the java distribution's tools.jar and copies it somewhere.
+"""
+
+import argparse
+import os
+import re
+import shutil
+import sys
+
+from util import build_utils
+
+RT_JAR_FINDER = re.compile(r'\[Opened (.*)/jre/lib/rt.jar\]')
+
+def main():
+  parser = argparse.ArgumentParser(description='Find Sun Tools Jar')
+  parser.add_argument('--depfile',
+                      help='Path to depfile. This must be specified as the '
+                           'action\'s first output.')
+  parser.add_argument('--output', required=True)
+  args = parser.parse_args()
+
+  sun_tools_jar_path = FindSunToolsJarPath()
+
+  if sun_tools_jar_path is None:
+    raise Exception("Couldn\'t find tools.jar")
+
+  # Using copyfile instead of copy() because copy() calls copymode()
+  # We don't want the locked mode because we may copy over this file again
+  shutil.copyfile(sun_tools_jar_path, args.output)
+
+  if args.depfile:
+    build_utils.WriteDepfile(
+        args.depfile,
+        [sun_tools_jar_path] + build_utils.GetPythonDependencies())
+
+
+def FindSunToolsJarPath():
+  # This works with at least openjdk 1.6, 1.7 and sun java 1.6, 1.7
+  stdout = build_utils.CheckOutput(
+      ["java", "-verbose", "-version"], print_stderr=False)
+  for ln in stdout.splitlines():
+    match = RT_JAR_FINDER.match(ln)
+    if match:
+      return os.path.join(match.group(1), 'lib', 'tools.jar')
+
+  return None
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/build/android/gyp/gcc_preprocess.py b/build/android/gyp/gcc_preprocess.py
new file mode 100755
index 0000000..03becf9
--- /dev/null
+++ b/build/android/gyp/gcc_preprocess.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+#
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import optparse
+import os
+import sys
+
+from util import build_utils
+
+def DoGcc(options):
+  build_utils.MakeDirectory(os.path.dirname(options.output))
+
+  gcc_cmd = [ 'gcc' ]  # invoke host gcc.
+  if options.defines:
+    gcc_cmd.extend(sum(map(lambda w: ['-D', w], options.defines), []))
+  gcc_cmd.extend([
+      '-E',                  # stop after preprocessing.
+      '-D', 'ANDROID',       # Specify ANDROID define for pre-processor.
+      '-x', 'c-header',      # treat sources as C header files
+      '-P',                  # disable line markers, i.e. '#line 309'
+      '-I', options.include_path,
+      '-o', options.output,
+      options.template
+      ])
+
+  build_utils.CheckOutput(gcc_cmd)
+
+
+def main(args):
+  args = build_utils.ExpandFileArgs(args)
+
+  parser = optparse.OptionParser()
+  build_utils.AddDepfileOption(parser)
+
+  parser.add_option('--include-path', help='Include path for gcc.')
+  parser.add_option('--template', help='Path to template.')
+  parser.add_option('--output', help='Path for generated file.')
+  parser.add_option('--stamp', help='Path to touch on success.')
+  parser.add_option('--defines', help='Pre-defines macros', action='append')
+
+  options, _ = parser.parse_args(args)
+
+  DoGcc(options)
+
+  if options.depfile:
+    build_utils.WriteDepfile(
+        options.depfile,
+        build_utils.GetPythonDependencies())
+
+  if options.stamp:
+    build_utils.Touch(options.stamp)
+
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv[1:]))
diff --git a/build/android/gyp/generate_copy_ex_outputs.py b/build/android/gyp/generate_copy_ex_outputs.py
new file mode 100755
index 0000000..e425b4a
--- /dev/null
+++ b/build/android/gyp/generate_copy_ex_outputs.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# Generate outputs according source files and destination path for
+# copy_ex.gypi
+
+import argparse
+import os
+import sys
+
+def DoMain(argv):
+  parser = argparse.ArgumentParser(prog='generate_copy_ex_outputs')
+  parser.add_argument('--src-files',
+                      nargs = '+',
+                      help = 'a list of files to copy')
+  parser.add_argument('--dest-path',
+                      required = True,
+                      help = 'the directory to copy file to')
+  options = parser.parse_args(argv)
+  # Quote each element so filename spaces don't mess up gyp's attempt to parse
+  # it into a list.
+  return ' '.join('"%s"' % os.path.join(options.dest_path,
+                                        os.path.basename(src))
+                  for src in options.src_files)
+
+if __name__ == '__main__':
+  results = DoMain(sys.argv[1:])
+  if results:
+    print results
+
diff --git a/build/android/gyp/generate_resource_rewriter.py b/build/android/gyp/generate_resource_rewriter.py
new file mode 100755
index 0000000..b6202ed
--- /dev/null
+++ b/build/android/gyp/generate_resource_rewriter.py
@@ -0,0 +1,110 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Generate ResourceRewriter.java which overwrites the given package's
+   resource id.
+"""
+
+import argparse
+import os
+import sys
+import zipfile
+
+from util import build_utils
+
+# Import jinja2 from third_party/jinja2
+sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
+                                             '..',
+                                             '..',
+                                             '..',
+                                             'third_party')))
+import jinja2
+
+
+RESOURCE_REWRITER_JAVA="ResourceRewriter.java"
+
+RESOURCE_REWRITER="""/* AUTO-GENERATED FILE.  DO NOT MODIFY. */
+
+package {{ package }};
+/**
+ * Helper class used to fix up resource ids.
+ */
+class ResourceRewriter {
+    /**
+     * Rewrite the R 'constants' for the WebView.
+     */
+    public static void rewriteRValues(final int packageId) {
+        {% for res_package in res_packages %}
+        {{ res_package }}.R.onResourcesLoaded(packageId);
+        {% endfor %}
+    }
+}
+"""
+
+def ParseArgs(args):
+  """Parses command line options.
+
+  Returns:
+    An Namespace from argparse.parse_args()
+  """
+  parser = argparse.ArgumentParser(prog='generate_resource_rewriter')
+
+  parser.add_argument('--package-name',
+                      required=True,
+                      help='The package name of ResourceRewriter.')
+  parser.add_argument('--dep-packages',
+                      required=True,
+                      help='A list of packages whose resource id will be'
+                           'overwritten in ResourceRewriter.')
+  parser.add_argument('--output-dir',
+                      help='A output directory of generated'
+                           ' ResourceRewriter.java')
+  parser.add_argument('--srcjar',
+                      help='The path of generated srcjar which has'
+                           ' ResourceRewriter.java')
+
+  return parser.parse_args(args)
+
+
+def CreateResourceRewriter(package, res_packages, output_dir):
+  build_utils.MakeDirectory(output_dir)
+  java_path = os.path.join(output_dir, RESOURCE_REWRITER_JAVA)
+  template = jinja2.Template(RESOURCE_REWRITER,
+                             trim_blocks=True,
+                             lstrip_blocks=True)
+  output = template.render(package=package, res_packages=res_packages)
+  with open(java_path, 'w') as f:
+    f.write(output)
+
+def CreateResourceRewriterSrcjar(package, res_packages, srcjar_path):
+  with build_utils.TempDir() as temp_dir:
+    output_dir = os.path.join(temp_dir, *package.split('.'))
+    CreateResourceRewriter(package, res_packages, output_dir)
+    build_utils.DoZip([os.path.join(output_dir, RESOURCE_REWRITER_JAVA)],
+                      srcjar_path,
+                      temp_dir)
+
+
+def main():
+  options = ParseArgs(build_utils.ExpandFileArgs(sys.argv[1:]))
+  package = options.package_name
+  if options.output_dir:
+    output_dir = os.path.join(options.output_dir, *package.split('.'))
+    CreateResourceRewriter(
+        package,
+        build_utils.ParseGypList(options.dep_packages),
+        output_dir)
+  else:
+    CreateResourceRewriterSrcjar(
+        package,
+        build_utils.ParseGypList(options.dep_packages),
+        options.srcjar)
+
+  return 0
+
+if __name__ == '__main__':
+  sys.exit(main())
+
diff --git a/build/android/gyp/generate_split_manifest.py b/build/android/gyp/generate_split_manifest.py
new file mode 100755
index 0000000..9cb3bca
--- /dev/null
+++ b/build/android/gyp/generate_split_manifest.py
@@ -0,0 +1,97 @@
+#!/usr/bin/env python
+#
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Creates an AndroidManifest.xml for an APK split.
+
+Given the manifest file for the main APK, generates an AndroidManifest.xml with
+the value required for a Split APK (package, versionCode, etc).
+"""
+
+import optparse
+import xml.etree.ElementTree
+
+from util import build_utils
+
+MANIFEST_TEMPLATE = """<?xml version="1.0" encoding="utf-8"?>
+<manifest
+    xmlns:android="http://schemas.android.com/apk/res/android"
+    package="%(package)s"
+    split="%(split)s">
+  <uses-sdk android:minSdkVersion="21" />
+  <application android:hasCode="%(has_code)s">
+  </application>
+</manifest>
+"""
+
+def ParseArgs():
+  """Parses command line options.
+
+  Returns:
+    An options object as from optparse.OptionsParser.parse_args()
+  """
+  parser = optparse.OptionParser()
+  build_utils.AddDepfileOption(parser)
+  parser.add_option('--main-manifest', help='The main manifest of the app')
+  parser.add_option('--out-manifest', help='The output manifest')
+  parser.add_option('--split', help='The name of the split')
+  parser.add_option(
+      '--has-code',
+      action='store_true',
+      default=False,
+      help='Whether the split will contain a .dex file')
+
+  (options, args) = parser.parse_args()
+
+  if args:
+    parser.error('No positional arguments should be given.')
+
+  # Check that required options have been provided.
+  required_options = ('main_manifest', 'out_manifest', 'split')
+  build_utils.CheckOptions(options, parser, required=required_options)
+
+  return options
+
+
+def Build(main_manifest, split, has_code):
+  """Builds a split manifest based on the manifest of the main APK.
+
+  Args:
+    main_manifest: the XML manifest of the main APK as a string
+    split: the name of the split as a string
+    has_code: whether this split APK will contain .dex files
+
+  Returns:
+    The XML split manifest as a string
+  """
+
+  doc = xml.etree.ElementTree.fromstring(main_manifest)
+  package = doc.get('package')
+
+  return MANIFEST_TEMPLATE % {
+      'package': package,
+      'split': split.replace('-', '_'),
+      'has_code': str(has_code).lower()
+  }
+
+
+def main():
+  options = ParseArgs()
+  main_manifest = file(options.main_manifest).read()
+  split_manifest = Build(
+      main_manifest,
+      options.split,
+      options.has_code)
+
+  with file(options.out_manifest, 'w') as f:
+    f.write(split_manifest)
+
+  if options.depfile:
+    build_utils.WriteDepfile(
+        options.depfile,
+        [options.main_manifest] + build_utils.GetPythonDependencies())
+
+
+if __name__ == '__main__':
+  main()
diff --git a/build/android/gyp/generate_v14_compatible_resources.py b/build/android/gyp/generate_v14_compatible_resources.py
new file mode 100755
index 0000000..fc7abba
--- /dev/null
+++ b/build/android/gyp/generate_v14_compatible_resources.py
@@ -0,0 +1,324 @@
+#!/usr/bin/env python
+#
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Convert Android xml resources to API 14 compatible.
+
+There are two reasons that we cannot just use API 17 attributes,
+so we are generating another set of resources by this script.
+
+1. paddingStart attribute can cause a crash on Galaxy Tab 2.
+2. There is a bug that paddingStart does not override paddingLeft on
+   JB-MR1. This is fixed on JB-MR2. b/8654490
+
+Therefore, this resource generation script can be removed when
+we drop the support for JB-MR1.
+
+Please refer to http://crbug.com/235118 for the details.
+"""
+
+import codecs
+import optparse
+import os
+import re
+import shutil
+import sys
+import xml.dom.minidom as minidom
+
+from util import build_utils
+
+# Note that we are assuming 'android:' is an alias of
+# the namespace 'http://schemas.android.com/apk/res/android'.
+
+GRAVITY_ATTRIBUTES = ('android:gravity', 'android:layout_gravity')
+
+# Almost all the attributes that has "Start" or "End" in
+# its name should be mapped.
+ATTRIBUTES_TO_MAP = {'paddingStart' : 'paddingLeft',
+                     'drawableStart' : 'drawableLeft',
+                     'layout_alignStart' : 'layout_alignLeft',
+                     'layout_marginStart' : 'layout_marginLeft',
+                     'layout_alignParentStart' : 'layout_alignParentLeft',
+                     'layout_toStartOf' : 'layout_toLeftOf',
+                     'paddingEnd' : 'paddingRight',
+                     'drawableEnd' : 'drawableRight',
+                     'layout_alignEnd' : 'layout_alignRight',
+                     'layout_marginEnd' : 'layout_marginRight',
+                     'layout_alignParentEnd' : 'layout_alignParentRight',
+                     'layout_toEndOf' : 'layout_toRightOf'}
+
+ATTRIBUTES_TO_MAP = dict(['android:' + k, 'android:' + v] for k, v
+                         in ATTRIBUTES_TO_MAP.iteritems())
+
+ATTRIBUTES_TO_MAP_REVERSED = dict([v, k] for k, v
+                                  in ATTRIBUTES_TO_MAP.iteritems())
+
+
+def IterateXmlElements(node):
+  """minidom helper function that iterates all the element nodes.
+  Iteration order is pre-order depth-first."""
+  if node.nodeType == node.ELEMENT_NODE:
+    yield node
+  for child_node in node.childNodes:
+    for child_node_element in IterateXmlElements(child_node):
+      yield child_node_element
+
+
+def ParseAndReportErrors(filename):
+  try:
+    return minidom.parse(filename)
+  except Exception: # pylint: disable=broad-except
+    import traceback
+    traceback.print_exc()
+    sys.stderr.write('Failed to parse XML file: %s\n' % filename)
+    sys.exit(1)
+
+
+def AssertNotDeprecatedAttribute(name, value, filename):
+  """Raises an exception if the given attribute is deprecated."""
+  msg = None
+  if name in ATTRIBUTES_TO_MAP_REVERSED:
+    msg = '{0} should use {1} instead of {2}'.format(filename,
+        ATTRIBUTES_TO_MAP_REVERSED[name], name)
+  elif name in GRAVITY_ATTRIBUTES and ('left' in value or 'right' in value):
+    msg = '{0} should use start/end instead of left/right for {1}'.format(
+        filename, name)
+
+  if msg:
+    msg += ('\nFor background, see: http://android-developers.blogspot.com/'
+            '2013/03/native-rtl-support-in-android-42.html\n'
+            'If you have a legitimate need for this attribute, discuss with '
+            'kkimlabs@chromium.org or newt@chromium.org')
+    raise Exception(msg)
+
+
+def WriteDomToFile(dom, filename):
+  """Write the given dom to filename."""
+  build_utils.MakeDirectory(os.path.dirname(filename))
+  with codecs.open(filename, 'w', 'utf-8') as f:
+    dom.writexml(f, '', '  ', '\n', encoding='utf-8')
+
+
+def HasStyleResource(dom):
+  """Return True if the dom is a style resource, False otherwise."""
+  root_node = IterateXmlElements(dom).next()
+  return bool(root_node.nodeName == 'resources' and
+              list(root_node.getElementsByTagName('style')))
+
+
+def ErrorIfStyleResourceExistsInDir(input_dir):
+  """If a style resource is in input_dir, raises an exception."""
+  for input_filename in build_utils.FindInDirectory(input_dir, '*.xml'):
+    dom = ParseAndReportErrors(input_filename)
+    if HasStyleResource(dom):
+      # Allow style file in third_party to exist in non-v17 directories so long
+      # as they do not contain deprecated attributes.
+      if not 'third_party' in input_dir or (
+          GenerateV14StyleResourceDom(dom, input_filename)):
+        raise Exception('error: style file ' + input_filename +
+                        ' should be under ' + input_dir +
+                        '-v17 directory. Please refer to '
+                        'http://crbug.com/243952 for the details.')
+
+
+def GenerateV14LayoutResourceDom(dom, filename, assert_not_deprecated=True):
+  """Convert layout resource to API 14 compatible layout resource.
+
+  Args:
+    dom: Parsed minidom object to be modified.
+    filename: Filename that the DOM was parsed from.
+    assert_not_deprecated: Whether deprecated attributes (e.g. paddingLeft) will
+                           cause an exception to be thrown.
+
+  Returns:
+    True if dom is modified, False otherwise.
+  """
+  is_modified = False
+
+  # Iterate all the elements' attributes to find attributes to convert.
+  for element in IterateXmlElements(dom):
+    for name, value in list(element.attributes.items()):
+      # Convert any API 17 Start/End attributes to Left/Right attributes.
+      # For example, from paddingStart="10dp" to paddingLeft="10dp"
+      # Note: gravity attributes are not necessary to convert because
+      # start/end values are backward-compatible. Explained at
+      # https://plus.sandbox.google.com/+RomanNurik/posts/huuJd8iVVXY?e=Showroom
+      if name in ATTRIBUTES_TO_MAP:
+        element.setAttribute(ATTRIBUTES_TO_MAP[name], value)
+        del element.attributes[name]
+        is_modified = True
+      elif assert_not_deprecated:
+        AssertNotDeprecatedAttribute(name, value, filename)
+
+  return is_modified
+
+
+def GenerateV14StyleResourceDom(dom, filename, assert_not_deprecated=True):
+  """Convert style resource to API 14 compatible style resource.
+
+  Args:
+    dom: Parsed minidom object to be modified.
+    filename: Filename that the DOM was parsed from.
+    assert_not_deprecated: Whether deprecated attributes (e.g. paddingLeft) will
+                           cause an exception to be thrown.
+
+  Returns:
+    True if dom is modified, False otherwise.
+  """
+  is_modified = False
+
+  for style_element in dom.getElementsByTagName('style'):
+    for item_element in style_element.getElementsByTagName('item'):
+      name = item_element.attributes['name'].value
+      value = item_element.childNodes[0].nodeValue
+      if name in ATTRIBUTES_TO_MAP:
+        item_element.attributes['name'].value = ATTRIBUTES_TO_MAP[name]
+        is_modified = True
+      elif assert_not_deprecated:
+        AssertNotDeprecatedAttribute(name, value, filename)
+
+  return is_modified
+
+
+def GenerateV14LayoutResource(input_filename, output_v14_filename,
+                              output_v17_filename):
+  """Convert API 17 layout resource to API 14 compatible layout resource.
+
+  It's mostly a simple replacement, s/Start/Left s/End/Right,
+  on the attribute names.
+  If the generated resource is identical to the original resource,
+  don't do anything. If not, write the generated resource to
+  output_v14_filename, and copy the original resource to output_v17_filename.
+  """
+  dom = ParseAndReportErrors(input_filename)
+  is_modified = GenerateV14LayoutResourceDom(dom, input_filename)
+
+  if is_modified:
+    # Write the generated resource.
+    WriteDomToFile(dom, output_v14_filename)
+
+    # Copy the original resource.
+    build_utils.MakeDirectory(os.path.dirname(output_v17_filename))
+    shutil.copy2(input_filename, output_v17_filename)
+
+
+def GenerateV14StyleResource(input_filename, output_v14_filename):
+  """Convert API 17 style resources to API 14 compatible style resource.
+
+  Write the generated style resource to output_v14_filename.
+  It's mostly a simple replacement, s/Start/Left s/End/Right,
+  on the attribute names.
+  """
+  dom = ParseAndReportErrors(input_filename)
+  GenerateV14StyleResourceDom(dom, input_filename)
+
+  # Write the generated resource.
+  WriteDomToFile(dom, output_v14_filename)
+
+
+def GenerateV14LayoutResourcesInDir(input_dir, output_v14_dir, output_v17_dir):
+  """Convert layout resources to API 14 compatible resources in input_dir."""
+  for input_filename in build_utils.FindInDirectory(input_dir, '*.xml'):
+    rel_filename = os.path.relpath(input_filename, input_dir)
+    output_v14_filename = os.path.join(output_v14_dir, rel_filename)
+    output_v17_filename = os.path.join(output_v17_dir, rel_filename)
+    GenerateV14LayoutResource(input_filename, output_v14_filename,
+                              output_v17_filename)
+
+
+def GenerateV14StyleResourcesInDir(input_dir, output_v14_dir):
+  """Convert style resources to API 14 compatible resources in input_dir."""
+  for input_filename in build_utils.FindInDirectory(input_dir, '*.xml'):
+    rel_filename = os.path.relpath(input_filename, input_dir)
+    output_v14_filename = os.path.join(output_v14_dir, rel_filename)
+    GenerateV14StyleResource(input_filename, output_v14_filename)
+
+
+def ParseArgs():
+  """Parses command line options.
+
+  Returns:
+    An options object as from optparse.OptionsParser.parse_args()
+  """
+  parser = optparse.OptionParser()
+  parser.add_option('--res-dir',
+                    help='directory containing resources '
+                         'used to generate v14 compatible resources')
+  parser.add_option('--res-v14-compatibility-dir',
+                    help='output directory into which '
+                         'v14 compatible resources will be generated')
+  parser.add_option('--stamp', help='File to touch on success')
+
+  options, args = parser.parse_args()
+
+  if args:
+    parser.error('No positional arguments should be given.')
+
+  # Check that required options have been provided.
+  required_options = ('res_dir', 'res_v14_compatibility_dir')
+  build_utils.CheckOptions(options, parser, required=required_options)
+  return options
+
+def GenerateV14Resources(res_dir, res_v14_dir):
+  for name in os.listdir(res_dir):
+    if not os.path.isdir(os.path.join(res_dir, name)):
+      continue
+
+    dir_pieces = name.split('-')
+    resource_type = dir_pieces[0]
+    qualifiers = dir_pieces[1:]
+
+    api_level_qualifier_index = -1
+    api_level_qualifier = ''
+    for index, qualifier in enumerate(qualifiers):
+      if re.match('v[0-9]+$', qualifier):
+        api_level_qualifier_index = index
+        api_level_qualifier = qualifier
+        break
+
+    # Android pre-v17 API doesn't support RTL. Skip.
+    if 'ldrtl' in qualifiers:
+      continue
+
+    input_dir = os.path.abspath(os.path.join(res_dir, name))
+
+    # We also need to copy the original v17 resource to *-v17 directory
+    # because the generated v14 resource will hide the original resource.
+    output_v14_dir = os.path.join(res_v14_dir, name)
+    output_v17_dir = os.path.join(res_v14_dir, name + '-v17')
+
+    # We only convert layout resources under layout*/, xml*/,
+    # and style resources under values*/.
+    if resource_type in ('layout', 'xml'):
+      if not api_level_qualifier:
+        GenerateV14LayoutResourcesInDir(input_dir, output_v14_dir,
+                                        output_v17_dir)
+    elif resource_type == 'values':
+      if api_level_qualifier == 'v17':
+        output_qualifiers = qualifiers[:]
+        del output_qualifiers[api_level_qualifier_index]
+        output_v14_dir = os.path.join(res_v14_dir,
+                                      '-'.join([resource_type] +
+                                               output_qualifiers))
+        GenerateV14StyleResourcesInDir(input_dir, output_v14_dir)
+      elif not api_level_qualifier:
+        ErrorIfStyleResourceExistsInDir(input_dir)
+
+def main():
+  options = ParseArgs()
+
+  res_v14_dir = options.res_v14_compatibility_dir
+
+  build_utils.DeleteDirectory(res_v14_dir)
+  build_utils.MakeDirectory(res_v14_dir)
+
+  GenerateV14Resources(options.res_dir, res_v14_dir)
+
+  if options.stamp:
+    build_utils.Touch(options.stamp)
+
+if __name__ == '__main__':
+  sys.exit(main())
+
diff --git a/build/android/gyp/get_device_configuration.py b/build/android/gyp/get_device_configuration.py
new file mode 100755
index 0000000..0ec08ef
--- /dev/null
+++ b/build/android/gyp/get_device_configuration.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python
+#
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Gets and writes the configurations of the attached devices.
+
+This configuration is used by later build steps to determine which devices to
+install to and what needs to be installed to those devices.
+"""
+
+import optparse
+import os
+import sys
+
+from util import build_device
+from util import build_utils
+
+BUILD_ANDROID_DIR = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), '..'))
+sys.path.append(BUILD_ANDROID_DIR)
+
+import devil_chromium
+
+
+def main(argv):
+  parser = optparse.OptionParser()
+  parser.add_option('--stamp', action='store')
+  parser.add_option('--output', action='store')
+  parser.add_option('--output-directory', action='store')
+  options, _ = parser.parse_args(argv)
+
+  devil_chromium.Initialize(
+      output_directory=os.path.abspath(options.output_directory))
+
+  devices = build_device.GetAttachedDevices()
+
+  device_configurations = []
+  for d in devices:
+    configuration, is_online, has_root = (
+        build_device.GetConfigurationForDevice(d))
+
+    if not is_online:
+      build_utils.PrintBigWarning(
+          '%s is not online. Skipping managed install for this device. '
+          'Try rebooting the device to fix this warning.' % d)
+      continue
+
+    if not has_root:
+      build_utils.PrintBigWarning(
+          '"adb root" failed on device: %s\n'
+          'Skipping managed install for this device.'
+          % configuration['description'])
+      continue
+
+    device_configurations.append(configuration)
+
+  if len(device_configurations) == 0:
+    build_utils.PrintBigWarning(
+        'No valid devices attached. Skipping managed install steps.')
+  elif len(devices) > 1:
+    # Note that this checks len(devices) and not len(device_configurations).
+    # This way, any time there are multiple devices attached it is
+    # explicitly stated which device we will install things to even if all but
+    # one device were rejected for other reasons (e.g. two devices attached,
+    # one w/o root).
+    build_utils.PrintBigWarning(
+        'Multiple devices attached. '
+        'Installing to the preferred device: '
+        '%(id)s (%(description)s)' % (device_configurations[0]))
+
+
+  build_device.WriteConfigurations(device_configurations, options.output)
+
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv))
diff --git a/build/android/gyp/insert_chromium_version.py b/build/android/gyp/insert_chromium_version.py
new file mode 100755
index 0000000..171f9d4
--- /dev/null
+++ b/build/android/gyp/insert_chromium_version.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Insert a version string into a library as a section '.chromium.version'.
+"""
+
+import optparse
+import os
+import sys
+import tempfile
+
+from util import build_utils
+
+def InsertChromiumVersion(android_objcopy,
+                          library_path,
+                          version_string):
+  # Remove existing .chromium.version section from .so
+  objcopy_command = [android_objcopy,
+                     '--remove-section=.chromium.version',
+                     library_path]
+  build_utils.CheckOutput(objcopy_command)
+
+  # Add a .chromium.version section.
+  with tempfile.NamedTemporaryFile() as stream:
+    stream.write(version_string)
+    stream.flush()
+    objcopy_command = [android_objcopy,
+                       '--add-section', '.chromium.version=%s' % stream.name,
+                       library_path]
+    build_utils.CheckOutput(objcopy_command)
+
+def main(args):
+  args = build_utils.ExpandFileArgs(args)
+  parser = optparse.OptionParser()
+
+  parser.add_option('--android-objcopy',
+      help='Path to the toolchain\'s objcopy binary')
+  parser.add_option('--stripped-libraries-dir',
+      help='Directory of native libraries')
+  parser.add_option('--libraries',
+      help='List of libraries')
+  parser.add_option('--version-string',
+      help='Version string to be inserted')
+  parser.add_option('--stamp', help='Path to touch on success')
+
+  options, _ = parser.parse_args(args)
+  libraries = build_utils.ParseGypList(options.libraries)
+
+  for library in libraries:
+    library_path = os.path.join(options.stripped_libraries_dir, library)
+
+    InsertChromiumVersion(options.android_objcopy,
+                          library_path,
+                          options.version_string)
+
+  if options.stamp:
+    build_utils.Touch(options.stamp)
+
+  return 0
+
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv[1:]))
diff --git a/build/android/gyp/jar.py b/build/android/gyp/jar.py
new file mode 100755
index 0000000..cfa5e50
--- /dev/null
+++ b/build/android/gyp/jar.py
@@ -0,0 +1,94 @@
+#!/usr/bin/env python
+#
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import optparse
+import os
+import sys
+
+from util import build_utils
+
+
+_RESOURCE_CLASSES = [
+    "R.class",
+    "R##*.class",
+    "Manifest.class",
+    "Manifest##*.class",
+]
+
+
+def Jar(class_files, classes_dir, jar_path, manifest_file=None):
+  jar_path = os.path.abspath(jar_path)
+
+  # The paths of the files in the jar will be the same as they are passed in to
+  # the command. Because of this, the command should be run in
+  # options.classes_dir so the .class file paths in the jar are correct.
+  jar_cwd = classes_dir
+  class_files_rel = [os.path.relpath(f, jar_cwd) for f in class_files]
+  jar_cmd = ['jar', 'cf0', jar_path]
+  if manifest_file:
+    jar_cmd[1] += 'm'
+    jar_cmd.append(os.path.abspath(manifest_file))
+  jar_cmd.extend(class_files_rel)
+
+  if not class_files_rel:
+    empty_file = os.path.join(classes_dir, '.empty')
+    build_utils.Touch(empty_file)
+    jar_cmd.append(os.path.relpath(empty_file, jar_cwd))
+  build_utils.CheckOutput(jar_cmd, cwd=jar_cwd)
+  build_utils.Touch(jar_path, fail_if_missing=True)
+
+
+def JarDirectory(classes_dir, jar_path, manifest_file=None, predicate=None):
+  class_files = build_utils.FindInDirectory(classes_dir, '*.class')
+  if predicate:
+    class_files = [f for f in class_files if predicate(f)]
+
+  Jar(class_files, classes_dir, jar_path, manifest_file=manifest_file)
+
+
+def main():
+  parser = optparse.OptionParser()
+  parser.add_option('--classes-dir', help='Directory containing .class files.')
+  parser.add_option('--input-jar', help='Jar to include .class files from')
+  parser.add_option('--jar-path', help='Jar output path.')
+  parser.add_option('--excluded-classes',
+      help='GYP list of .class file patterns to exclude from the jar.')
+  parser.add_option('--strip-resource-classes-for',
+      help='GYP list of java package names exclude R.class files in.')
+  parser.add_option('--stamp', help='Path to touch on success.')
+
+  args = build_utils.ExpandFileArgs(sys.argv[1:])
+  options, _ = parser.parse_args(args)
+  # Current implementation supports just one or the other of these:
+  assert not options.classes_dir or not options.input_jar
+
+  excluded_classes = []
+  if options.excluded_classes:
+    excluded_classes = build_utils.ParseGypList(options.excluded_classes)
+
+  if options.strip_resource_classes_for:
+    packages = build_utils.ParseGypList(options.strip_resource_classes_for)
+    excluded_classes.extend(p.replace('.', '/') + '/' + f
+                            for p in packages for f in _RESOURCE_CLASSES)
+
+  predicate = None
+  if excluded_classes:
+    predicate = lambda f: not build_utils.MatchesGlob(f, excluded_classes)
+
+  with build_utils.TempDir() as temp_dir:
+    classes_dir = options.classes_dir
+    if options.input_jar:
+      build_utils.ExtractAll(options.input_jar, temp_dir)
+      classes_dir = temp_dir
+    JarDirectory(classes_dir, options.jar_path, predicate=predicate)
+
+  if options.stamp:
+    build_utils.Touch(options.stamp)
+
+
+if __name__ == '__main__':
+  sys.exit(main())
+
diff --git a/build/android/gyp/jar_toc.py b/build/android/gyp/jar_toc.py
new file mode 100755
index 0000000..b830956
--- /dev/null
+++ b/build/android/gyp/jar_toc.py
@@ -0,0 +1,127 @@
+#!/usr/bin/env python
+#
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Creates a TOC file from a Java jar.
+
+The TOC file contains the non-package API of the jar. This includes all
+public/protected/package classes/functions/members and the values of static
+final variables (members with package access are kept because in some cases we
+have multiple libraries with the same package, particularly test+non-test). Some
+other information (major/minor javac version) is also included.
+
+This TOC file then can be used to determine if a dependent library should be
+rebuilt when this jar changes. I.e. any change to the jar that would require a
+rebuild, will have a corresponding change in the TOC file.
+"""
+
+import optparse
+import os
+import re
+import sys
+import zipfile
+
+from util import build_utils
+from util import md5_check
+
+
+def GetClassesInZipFile(zip_file):
+  classes = []
+  files = zip_file.namelist()
+  for f in files:
+    if f.endswith('.class'):
+      # f is of the form org/chromium/base/Class$Inner.class
+      classes.append(f.replace('/', '.')[:-6])
+  return classes
+
+
+def CallJavap(classpath, classes):
+  javap_cmd = [
+      'javap',
+      '-package',  # Show public/protected/package.
+      # -verbose is required to get constant values (which can be inlined in
+      # dependents).
+      '-verbose',
+      '-J-XX:NewSize=4m',
+      '-classpath', classpath
+      ] + classes
+  return build_utils.CheckOutput(javap_cmd)
+
+
+def ExtractToc(disassembled_classes):
+  # javap output is structured by indent (2-space) levels.
+  good_patterns = [
+      '^[^ ]', # This includes all class signatures.
+      '^  SourceFile:',
+      '^  minor version:',
+      '^  major version:',
+      '^  Constant value:',
+      '^  public ',
+      '^  protected ',
+      ]
+  bad_patterns = [
+      '^const #', # Matches the constant pool (i.e. literals used in the class).
+    ]
+
+  def JavapFilter(line):
+    return (re.match('|'.join(good_patterns), line) and
+        not re.match('|'.join(bad_patterns), line))
+  toc = filter(JavapFilter, disassembled_classes.split('\n'))
+
+  return '\n'.join(toc)
+
+
+def UpdateToc(jar_path, toc_path):
+  classes = GetClassesInZipFile(zipfile.ZipFile(jar_path))
+  toc = ''
+  if len(classes) != 0:
+    javap_output = CallJavap(classpath=jar_path, classes=classes)
+    toc = ExtractToc(javap_output)
+
+  with open(toc_path, 'w') as tocfile:
+    tocfile.write(toc)
+
+
+def DoJarToc(options):
+  jar_path = options.jar_path
+  toc_path = options.toc_path
+  record_path = '%s.md5.stamp' % toc_path
+  md5_check.CallAndRecordIfStale(
+      lambda: UpdateToc(jar_path, toc_path),
+      record_path=record_path,
+      input_paths=[jar_path],
+      force=not os.path.exists(toc_path),
+      )
+  build_utils.Touch(toc_path, fail_if_missing=True)
+
+
+def main():
+  parser = optparse.OptionParser()
+  build_utils.AddDepfileOption(parser)
+
+  parser.add_option('--jar-path', help='Input .jar path.')
+  parser.add_option('--toc-path', help='Output .jar.TOC path.')
+  parser.add_option('--stamp', help='Path to touch on success.')
+
+  options, _ = parser.parse_args()
+
+  if options.depfile:
+    build_utils.WriteDepfile(
+        options.depfile,
+        build_utils.GetPythonDependencies())
+
+  DoJarToc(options)
+
+  if options.depfile:
+    build_utils.WriteDepfile(
+        options.depfile,
+        build_utils.GetPythonDependencies())
+
+  if options.stamp:
+    build_utils.Touch(options.stamp)
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/build/android/gyp/java_cpp_enum.py b/build/android/gyp/java_cpp_enum.py
new file mode 100755
index 0000000..b304930
--- /dev/null
+++ b/build/android/gyp/java_cpp_enum.py
@@ -0,0 +1,369 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+from datetime import date
+import re
+import optparse
+import os
+from string import Template
+import sys
+import zipfile
+
+from util import build_utils
+
+# List of C++ types that are compatible with the Java code generated by this
+# script.
+#
+# This script can parse .idl files however, at present it ignores special
+# rules such as [cpp_enum_prefix_override="ax_attr"].
+ENUM_FIXED_TYPE_WHITELIST = ['char', 'unsigned char',
+  'short', 'unsigned short',
+  'int', 'int8_t', 'int16_t', 'int32_t', 'uint8_t', 'uint16_t']
+
+class EnumDefinition(object):
+  def __init__(self, original_enum_name=None, class_name_override=None,
+               enum_package=None, entries=None, fixed_type=None):
+    self.original_enum_name = original_enum_name
+    self.class_name_override = class_name_override
+    self.enum_package = enum_package
+    self.entries = collections.OrderedDict(entries or [])
+    self.prefix_to_strip = None
+    self.fixed_type = fixed_type
+
+  def AppendEntry(self, key, value):
+    if key in self.entries:
+      raise Exception('Multiple definitions of key %s found.' % key)
+    self.entries[key] = value
+
+  @property
+  def class_name(self):
+    return self.class_name_override or self.original_enum_name
+
+  def Finalize(self):
+    self._Validate()
+    self._AssignEntryIndices()
+    self._StripPrefix()
+
+  def _Validate(self):
+    assert self.class_name
+    assert self.enum_package
+    assert self.entries
+    if self.fixed_type and self.fixed_type not in ENUM_FIXED_TYPE_WHITELIST:
+      raise Exception('Fixed type %s for enum %s not whitelisted.' %
+          (self.fixed_type, self.class_name))
+
+  def _AssignEntryIndices(self):
+    # Enums, if given no value, are given the value of the previous enum + 1.
+    if not all(self.entries.values()):
+      prev_enum_value = -1
+      for key, value in self.entries.iteritems():
+        if not value:
+          self.entries[key] = prev_enum_value + 1
+        elif value in self.entries:
+          self.entries[key] = self.entries[value]
+        else:
+          try:
+            self.entries[key] = int(value)
+          except ValueError:
+            raise Exception('Could not interpret integer from enum value "%s" '
+                            'for key %s.' % (value, key))
+        prev_enum_value = self.entries[key]
+
+
+  def _StripPrefix(self):
+    prefix_to_strip = self.prefix_to_strip
+    if not prefix_to_strip:
+      prefix_to_strip = self.original_enum_name
+      prefix_to_strip = re.sub('(?!^)([A-Z]+)', r'_\1', prefix_to_strip).upper()
+      prefix_to_strip += '_'
+      if not all([w.startswith(prefix_to_strip) for w in self.entries.keys()]):
+        prefix_to_strip = ''
+
+    entries = collections.OrderedDict()
+    for (k, v) in self.entries.iteritems():
+      stripped_key = k.replace(prefix_to_strip, '', 1)
+      if isinstance(v, basestring):
+        stripped_value = v.replace(prefix_to_strip, '', 1)
+      else:
+        stripped_value = v
+      entries[stripped_key] = stripped_value
+
+    self.entries = entries
+
+class DirectiveSet(object):
+  class_name_override_key = 'CLASS_NAME_OVERRIDE'
+  enum_package_key = 'ENUM_PACKAGE'
+  prefix_to_strip_key = 'PREFIX_TO_STRIP'
+
+  known_keys = [class_name_override_key, enum_package_key, prefix_to_strip_key]
+
+  def __init__(self):
+    self._directives = {}
+
+  def Update(self, key, value):
+    if key not in DirectiveSet.known_keys:
+      raise Exception("Unknown directive: " + key)
+    self._directives[key] = value
+
+  @property
+  def empty(self):
+    return len(self._directives) == 0
+
+  def UpdateDefinition(self, definition):
+    definition.class_name_override = self._directives.get(
+        DirectiveSet.class_name_override_key, '')
+    definition.enum_package = self._directives.get(
+        DirectiveSet.enum_package_key)
+    definition.prefix_to_strip = self._directives.get(
+        DirectiveSet.prefix_to_strip_key)
+
+
+class HeaderParser(object):
+  single_line_comment_re = re.compile(r'\s*//')
+  multi_line_comment_start_re = re.compile(r'\s*/\*')
+  enum_line_re = re.compile(r'^\s*(\w+)(\s*\=\s*([^,\n]+))?,?')
+  enum_end_re = re.compile(r'^\s*}\s*;\.*$')
+  generator_directive_re = re.compile(
+      r'^\s*//\s+GENERATED_JAVA_(\w+)\s*:\s*([\.\w]+)$')
+  multi_line_generator_directive_start_re = re.compile(
+      r'^\s*//\s+GENERATED_JAVA_(\w+)\s*:\s*\(([\.\w]*)$')
+  multi_line_directive_continuation_re = re.compile(
+      r'^\s*//\s+([\.\w]+)$')
+  multi_line_directive_end_re = re.compile(
+      r'^\s*//\s+([\.\w]*)\)$')
+
+  optional_class_or_struct_re = r'(class|struct)?'
+  enum_name_re = r'(\w+)'
+  optional_fixed_type_re = r'(\:\s*(\w+\s*\w+?))?'
+  enum_start_re = re.compile(r'^\s*(?:\[cpp.*\])?\s*enum\s+' +
+      optional_class_or_struct_re + '\s*' + enum_name_re + '\s*' +
+      optional_fixed_type_re + '\s*{\s*$')
+
+  def __init__(self, lines, path=None):
+    self._lines = lines
+    self._path = path
+    self._enum_definitions = []
+    self._in_enum = False
+    self._current_definition = None
+    self._generator_directives = DirectiveSet()
+    self._multi_line_generator_directive = None
+
+  def _ApplyGeneratorDirectives(self):
+    self._generator_directives.UpdateDefinition(self._current_definition)
+    self._generator_directives = DirectiveSet()
+
+  def ParseDefinitions(self):
+    for line in self._lines:
+      self._ParseLine(line)
+    return self._enum_definitions
+
+  def _ParseLine(self, line):
+    if self._multi_line_generator_directive:
+      self._ParseMultiLineDirectiveLine(line)
+    elif not self._in_enum:
+      self._ParseRegularLine(line)
+    else:
+      self._ParseEnumLine(line)
+
+  def _ParseEnumLine(self, line):
+    if HeaderParser.single_line_comment_re.match(line):
+      return
+    if HeaderParser.multi_line_comment_start_re.match(line):
+      raise Exception('Multi-line comments in enums are not supported in ' +
+                      self._path)
+    enum_end = HeaderParser.enum_end_re.match(line)
+    enum_entry = HeaderParser.enum_line_re.match(line)
+    if enum_end:
+      self._ApplyGeneratorDirectives()
+      self._current_definition.Finalize()
+      self._enum_definitions.append(self._current_definition)
+      self._in_enum = False
+    elif enum_entry:
+      enum_key = enum_entry.groups()[0]
+      enum_value = enum_entry.groups()[2]
+      self._current_definition.AppendEntry(enum_key, enum_value)
+
+  def _ParseMultiLineDirectiveLine(self, line):
+    multi_line_directive_continuation = (
+        HeaderParser.multi_line_directive_continuation_re.match(line))
+    multi_line_directive_end = (
+        HeaderParser.multi_line_directive_end_re.match(line))
+
+    if multi_line_directive_continuation:
+      value_cont = multi_line_directive_continuation.groups()[0]
+      self._multi_line_generator_directive[1].append(value_cont)
+    elif multi_line_directive_end:
+      directive_name = self._multi_line_generator_directive[0]
+      directive_value = "".join(self._multi_line_generator_directive[1])
+      directive_value += multi_line_directive_end.groups()[0]
+      self._multi_line_generator_directive = None
+      self._generator_directives.Update(directive_name, directive_value)
+    else:
+      raise Exception('Malformed multi-line directive declaration in ' +
+                      self._path)
+
+  def _ParseRegularLine(self, line):
+    enum_start = HeaderParser.enum_start_re.match(line)
+    generator_directive = HeaderParser.generator_directive_re.match(line)
+    multi_line_generator_directive_start = (
+        HeaderParser.multi_line_generator_directive_start_re.match(line))
+
+    if generator_directive:
+      directive_name = generator_directive.groups()[0]
+      directive_value = generator_directive.groups()[1]
+      self._generator_directives.Update(directive_name, directive_value)
+    elif multi_line_generator_directive_start:
+      directive_name = multi_line_generator_directive_start.groups()[0]
+      directive_value = multi_line_generator_directive_start.groups()[1]
+      self._multi_line_generator_directive = (directive_name, [directive_value])
+    elif enum_start:
+      if self._generator_directives.empty:
+        return
+      self._current_definition = EnumDefinition(
+          original_enum_name=enum_start.groups()[1],
+          fixed_type=enum_start.groups()[3])
+      self._in_enum = True
+
+def GetScriptName():
+  return os.path.basename(os.path.abspath(sys.argv[0]))
+
+def DoGenerate(source_paths):
+  for source_path in source_paths:
+    enum_definitions = DoParseHeaderFile(source_path)
+    if not enum_definitions:
+      raise Exception('No enums found in %s\n'
+                      'Did you forget prefixing enums with '
+                      '"// GENERATED_JAVA_ENUM_PACKAGE: foo"?' %
+                      source_path)
+    for enum_definition in enum_definitions:
+      package_path = enum_definition.enum_package.replace('.', os.path.sep)
+      file_name = enum_definition.class_name + '.java'
+      output_path = os.path.join(package_path, file_name)
+      output = GenerateOutput(source_path, enum_definition)
+      yield output_path, output
+
+
+def DoParseHeaderFile(path):
+  with open(path) as f:
+    return HeaderParser(f.readlines(), path).ParseDefinitions()
+
+
+def GenerateOutput(source_path, enum_definition):
+  template = Template("""
+// Copyright ${YEAR} The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is autogenerated by
+//     ${SCRIPT_NAME}
+// From
+//     ${SOURCE_PATH}
+
+package ${PACKAGE};
+
+public class ${CLASS_NAME} {
+${ENUM_ENTRIES}
+}
+""")
+
+  enum_template = Template('  public static final int ${NAME} = ${VALUE};')
+  enum_entries_string = []
+  for enum_name, enum_value in enum_definition.entries.iteritems():
+    values = {
+        'NAME': enum_name,
+        'VALUE': enum_value,
+    }
+    enum_entries_string.append(enum_template.substitute(values))
+  enum_entries_string = '\n'.join(enum_entries_string)
+
+  values = {
+      'CLASS_NAME': enum_definition.class_name,
+      'ENUM_ENTRIES': enum_entries_string,
+      'PACKAGE': enum_definition.enum_package,
+      'SCRIPT_NAME': GetScriptName(),
+      'SOURCE_PATH': source_path,
+      'YEAR': str(date.today().year)
+  }
+  return template.substitute(values)
+
+
+def AssertFilesList(output_paths, assert_files_list):
+  actual = set(output_paths)
+  expected = set(assert_files_list)
+  if not actual == expected:
+    need_to_add = list(actual - expected)
+    need_to_remove = list(expected - actual)
+    raise Exception('Output files list does not match expectations. Please '
+                    'add %s and remove %s.' % (need_to_add, need_to_remove))
+
+def DoMain(argv):
+  usage = 'usage: %prog [options] [output_dir] input_file(s)...'
+  parser = optparse.OptionParser(usage=usage)
+  build_utils.AddDepfileOption(parser)
+
+  parser.add_option('--assert_file', action="append", default=[],
+                    dest="assert_files_list", help='Assert that the given '
+                    'file is an output. There can be multiple occurrences of '
+                    'this flag.')
+  parser.add_option('--srcjar',
+                    help='When specified, a .srcjar at the given path is '
+                    'created instead of individual .java files.')
+  parser.add_option('--print_output_only', help='Only print output paths.',
+                    action='store_true')
+  parser.add_option('--verbose', help='Print more information.',
+                    action='store_true')
+
+  options, args = parser.parse_args(argv)
+
+  if options.srcjar:
+    if not args:
+      parser.error('Need to specify at least one input file')
+    input_paths = args
+  else:
+    if len(args) < 2:
+      parser.error(
+          'Need to specify output directory and at least one input file')
+    output_dir = args[0]
+    input_paths = args[1:]
+
+  if options.depfile:
+    python_deps = build_utils.GetPythonDependencies()
+    build_utils.WriteDepfile(options.depfile, input_paths + python_deps)
+
+  if options.srcjar:
+    if options.print_output_only:
+      parser.error('--print_output_only does not work with --srcjar')
+    if options.assert_files_list:
+      parser.error('--assert_file does not work with --srcjar')
+
+    with zipfile.ZipFile(options.srcjar, 'w', zipfile.ZIP_STORED) as srcjar:
+      for output_path, data in DoGenerate(input_paths):
+        build_utils.AddToZipHermetic(srcjar, output_path, data=data)
+  else:
+    # TODO(agrieve): Delete this non-srcjar branch once GYP is gone.
+    output_paths = []
+    for output_path, data in DoGenerate(input_paths):
+      full_path = os.path.join(output_dir, output_path)
+      output_paths.append(full_path)
+      if not options.print_output_only:
+        build_utils.MakeDirectory(os.path.dirname(full_path))
+        with open(full_path, 'w') as out_file:
+          out_file.write(data)
+
+    if options.assert_files_list:
+      AssertFilesList(output_paths, options.assert_files_list)
+
+    if options.verbose:
+      print 'Output paths:'
+      print '\n'.join(output_paths)
+
+    # Used by GYP.
+    return ' '.join(output_paths)
+
+
+if __name__ == '__main__':
+  DoMain(sys.argv[1:])
diff --git a/build/android/gyp/java_cpp_enum_tests.py b/build/android/gyp/java_cpp_enum_tests.py
new file mode 100755
index 0000000..902bbfa
--- /dev/null
+++ b/build/android/gyp/java_cpp_enum_tests.py
@@ -0,0 +1,438 @@
+#!/usr/bin/env python
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Tests for enum_preprocess.py.
+
+This test suite containss various tests for the C++ -> Java enum generator.
+"""
+
+import collections
+from datetime import date
+import optparse
+import os
+import sys
+import unittest
+
+import java_cpp_enum
+from java_cpp_enum import EnumDefinition, GenerateOutput, GetScriptName
+from java_cpp_enum import HeaderParser
+
+sys.path.append(os.path.join(os.path.dirname(__file__), "gyp"))
+from util import build_utils
+
+class TestPreprocess(unittest.TestCase):
+  def testOutput(self):
+    definition = EnumDefinition(original_enum_name='ClassName',
+                                enum_package='some.package',
+                                entries=[('E1', 1), ('E2', '2 << 2')])
+    output = GenerateOutput('path/to/file', definition)
+    expected = """
+// Copyright %d The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is autogenerated by
+//     %s
+// From
+//     path/to/file
+
+package some.package;
+
+public class ClassName {
+  public static final int E1 = 1;
+  public static final int E2 = 2 << 2;
+}
+"""
+    self.assertEqual(expected % (date.today().year, GetScriptName()), output)
+
+  def testParseSimpleEnum(self):
+    test_data = """
+      // GENERATED_JAVA_ENUM_PACKAGE: test.namespace
+      enum EnumName {
+        VALUE_ZERO,
+        VALUE_ONE,
+      };
+    """.split('\n')
+    definitions = HeaderParser(test_data).ParseDefinitions()
+    self.assertEqual(1, len(definitions))
+    definition = definitions[0]
+    self.assertEqual('EnumName', definition.class_name)
+    self.assertEqual('test.namespace', definition.enum_package)
+    self.assertEqual(collections.OrderedDict([('VALUE_ZERO', 0),
+                                              ('VALUE_ONE', 1)]),
+                     definition.entries)
+
+  def testParseBitShifts(self):
+    test_data = """
+      // GENERATED_JAVA_ENUM_PACKAGE: test.namespace
+      enum EnumName {
+        VALUE_ZERO = 1 << 0,
+        VALUE_ONE = 1 << 1,
+      };
+    """.split('\n')
+    definitions = HeaderParser(test_data).ParseDefinitions()
+    self.assertEqual(1, len(definitions))
+    definition = definitions[0]
+    self.assertEqual('EnumName', definition.class_name)
+    self.assertEqual('test.namespace', definition.enum_package)
+    self.assertEqual(collections.OrderedDict([('VALUE_ZERO', '1 << 0'),
+                                              ('VALUE_ONE', '1 << 1')]),
+                     definition.entries)
+
+  def testParseClassNameOverride(self):
+    test_data = """
+      // GENERATED_JAVA_ENUM_PACKAGE: test.namespace
+      // GENERATED_JAVA_CLASS_NAME_OVERRIDE: OverrideName
+      enum EnumName {
+        FOO
+      };
+
+      // GENERATED_JAVA_ENUM_PACKAGE: test.namespace
+      // GENERATED_JAVA_CLASS_NAME_OVERRIDE: OtherOverride
+      enum PrefixTest {
+        PREFIX_TEST_A,
+        PREFIX_TEST_B,
+      };
+    """.split('\n')
+    definitions = HeaderParser(test_data).ParseDefinitions()
+    self.assertEqual(2, len(definitions))
+    definition = definitions[0]
+    self.assertEqual('OverrideName', definition.class_name)
+
+    definition = definitions[1]
+    self.assertEqual('OtherOverride', definition.class_name)
+    self.assertEqual(collections.OrderedDict([('A', 0),
+                                              ('B', 1)]),
+                     definition.entries)
+
+  def testParseTwoEnums(self):
+    test_data = """
+      // GENERATED_JAVA_ENUM_PACKAGE: test.namespace
+      enum EnumOne {
+        ENUM_ONE_A = 1,
+        // Comment there
+        ENUM_ONE_B = A,
+      };
+
+      enum EnumIgnore {
+        C, D, E
+      };
+
+      // GENERATED_JAVA_ENUM_PACKAGE: other.package
+      // GENERATED_JAVA_PREFIX_TO_STRIP: P_
+      enum EnumTwo {
+        P_A,
+        P_B
+      };
+    """.split('\n')
+    definitions = HeaderParser(test_data).ParseDefinitions()
+    self.assertEqual(2, len(definitions))
+    definition = definitions[0]
+    self.assertEqual('EnumOne', definition.class_name)
+    self.assertEqual('test.namespace', definition.enum_package)
+    self.assertEqual(collections.OrderedDict([('A', '1'),
+                                              ('B', 'A')]),
+                     definition.entries)
+
+    definition = definitions[1]
+    self.assertEqual('EnumTwo', definition.class_name)
+    self.assertEqual('other.package', definition.enum_package)
+    self.assertEqual(collections.OrderedDict([('A', 0),
+                                              ('B', 1)]),
+                     definition.entries)
+
+  def testParseThrowsOnUnknownDirective(self):
+    test_data = """
+      // GENERATED_JAVA_UNKNOWN: Value
+      enum EnumName {
+        VALUE_ONE,
+      };
+    """.split('\n')
+    with self.assertRaises(Exception):
+      HeaderParser(test_data).ParseDefinitions()
+
+  def testParseReturnsEmptyListWithoutDirectives(self):
+    test_data = """
+      enum EnumName {
+        VALUE_ONE,
+      };
+    """.split('\n')
+    self.assertEqual([], HeaderParser(test_data).ParseDefinitions())
+
+  def testParseEnumClass(self):
+    test_data = """
+      // GENERATED_JAVA_ENUM_PACKAGE: test.namespace
+      enum class Foo {
+        FOO_A,
+      };
+    """.split('\n')
+    definitions = HeaderParser(test_data).ParseDefinitions()
+    self.assertEqual(1, len(definitions))
+    definition = definitions[0]
+    self.assertEqual('Foo', definition.class_name)
+    self.assertEqual('test.namespace', definition.enum_package)
+    self.assertEqual(collections.OrderedDict([('A', 0)]),
+                     definition.entries)
+
+  def testParseEnumStruct(self):
+    test_data = """
+      // GENERATED_JAVA_ENUM_PACKAGE: test.namespace
+      enum struct Foo {
+        FOO_A,
+      };
+    """.split('\n')
+    definitions = HeaderParser(test_data).ParseDefinitions()
+    self.assertEqual(1, len(definitions))
+    definition = definitions[0]
+    self.assertEqual('Foo', definition.class_name)
+    self.assertEqual('test.namespace', definition.enum_package)
+    self.assertEqual(collections.OrderedDict([('A', 0)]),
+                     definition.entries)
+
+  def testParseFixedTypeEnum(self):
+    test_data = """
+      // GENERATED_JAVA_ENUM_PACKAGE: test.namespace
+      enum Foo : int {
+        FOO_A,
+      };
+    """.split('\n')
+    definitions = HeaderParser(test_data).ParseDefinitions()
+    self.assertEqual(1, len(definitions))
+    definition = definitions[0]
+    self.assertEqual('Foo', definition.class_name)
+    self.assertEqual('test.namespace', definition.enum_package)
+    self.assertEqual('int', definition.fixed_type)
+    self.assertEqual(collections.OrderedDict([('A', 0)]),
+                     definition.entries)
+
+  def testParseFixedTypeEnumClass(self):
+    test_data = """
+      // GENERATED_JAVA_ENUM_PACKAGE: test.namespace
+      enum class Foo: unsigned short {
+        FOO_A,
+      };
+    """.split('\n')
+    definitions = HeaderParser(test_data).ParseDefinitions()
+    self.assertEqual(1, len(definitions))
+    definition = definitions[0]
+    self.assertEqual('Foo', definition.class_name)
+    self.assertEqual('test.namespace', definition.enum_package)
+    self.assertEqual('unsigned short', definition.fixed_type)
+    self.assertEqual(collections.OrderedDict([('A', 0)]),
+                     definition.entries)
+
+  def testParseUnknownFixedTypeRaises(self):
+    test_data = """
+      // GENERATED_JAVA_ENUM_PACKAGE: test.namespace
+      enum class Foo: foo_type {
+        FOO_A,
+      };
+    """.split('\n')
+    with self.assertRaises(Exception):
+      HeaderParser(test_data).ParseDefinitions()
+
+  def testParseSimpleMultiLineDirective(self):
+    test_data = """
+      // GENERATED_JAVA_ENUM_PACKAGE: (
+      //   test.namespace)
+      // GENERATED_JAVA_CLASS_NAME_OVERRIDE: Bar
+      enum Foo {
+        FOO_A,
+      };
+    """.split('\n')
+    definitions = HeaderParser(test_data).ParseDefinitions()
+    self.assertEqual('test.namespace', definitions[0].enum_package)
+    self.assertEqual('Bar', definitions[0].class_name)
+
+  def testParseMultiLineDirective(self):
+    test_data = """
+      // GENERATED_JAVA_ENUM_PACKAGE: (te
+      //   st.name
+      //   space)
+      enum Foo {
+        FOO_A,
+      };
+    """.split('\n')
+    definitions = HeaderParser(test_data).ParseDefinitions()
+    self.assertEqual('test.namespace', definitions[0].enum_package)
+
+  def testParseMultiLineDirectiveWithOtherDirective(self):
+    test_data = """
+      // GENERATED_JAVA_ENUM_PACKAGE: (
+      //   test.namespace)
+      // GENERATED_JAVA_CLASS_NAME_OVERRIDE: (
+      //   Ba
+      //   r
+      //   )
+      enum Foo {
+        FOO_A,
+      };
+    """.split('\n')
+    definitions = HeaderParser(test_data).ParseDefinitions()
+    self.assertEqual('test.namespace', definitions[0].enum_package)
+    self.assertEqual('Bar', definitions[0].class_name)
+
+  def testParseMalformedMultiLineDirectiveWithOtherDirective(self):
+    test_data = """
+      // GENERATED_JAVA_ENUM_PACKAGE: (
+      //   test.name
+      //   space
+      // GENERATED_JAVA_CLASS_NAME_OVERRIDE: Bar
+      enum Foo {
+        FOO_A,
+      };
+    """.split('\n')
+    with self.assertRaises(Exception):
+      HeaderParser(test_data).ParseDefinitions()
+
+  def testParseMalformedMultiLineDirective(self):
+    test_data = """
+      // GENERATED_JAVA_ENUM_PACKAGE: (
+      //   test.name
+      //   space
+      enum Foo {
+        FOO_A,
+      };
+    """.split('\n')
+    with self.assertRaises(Exception):
+      HeaderParser(test_data).ParseDefinitions()
+
+  def testParseMalformedMultiLineDirectiveShort(self):
+    test_data = """
+      // GENERATED_JAVA_ENUM_PACKAGE: (
+      enum Foo {
+        FOO_A,
+      };
+    """.split('\n')
+    with self.assertRaises(Exception):
+      HeaderParser(test_data).ParseDefinitions()
+
+  def testEnumValueAssignmentNoneDefined(self):
+    definition = EnumDefinition(original_enum_name='c', enum_package='p')
+    definition.AppendEntry('A', None)
+    definition.AppendEntry('B', None)
+    definition.AppendEntry('C', None)
+    definition.Finalize()
+    self.assertEqual(collections.OrderedDict([('A', 0),
+                                              ('B', 1),
+                                              ('C', 2)]),
+                     definition.entries)
+
+  def testEnumValueAssignmentAllDefined(self):
+    definition = EnumDefinition(original_enum_name='c', enum_package='p')
+    definition.AppendEntry('A', '1')
+    definition.AppendEntry('B', '2')
+    definition.AppendEntry('C', '3')
+    definition.Finalize()
+    self.assertEqual(collections.OrderedDict([('A', '1'),
+                                              ('B', '2'),
+                                              ('C', '3')]),
+                     definition.entries)
+
+  def testEnumValueAssignmentReferences(self):
+    definition = EnumDefinition(original_enum_name='c', enum_package='p')
+    definition.AppendEntry('A', None)
+    definition.AppendEntry('B', 'A')
+    definition.AppendEntry('C', None)
+    definition.AppendEntry('D', 'C')
+    definition.Finalize()
+    self.assertEqual(collections.OrderedDict([('A', 0),
+                                              ('B', 0),
+                                              ('C', 1),
+                                              ('D', 1)]),
+                     definition.entries)
+
+  def testEnumValueAssignmentSet(self):
+    definition = EnumDefinition(original_enum_name='c', enum_package='p')
+    definition.AppendEntry('A', None)
+    definition.AppendEntry('B', '2')
+    definition.AppendEntry('C', None)
+    definition.Finalize()
+    self.assertEqual(collections.OrderedDict([('A', 0),
+                                              ('B', 2),
+                                              ('C', 3)]),
+                     definition.entries)
+
+  def testEnumValueAssignmentSetReferences(self):
+    definition = EnumDefinition(original_enum_name='c', enum_package='p')
+    definition.AppendEntry('A', None)
+    definition.AppendEntry('B', 'A')
+    definition.AppendEntry('C', 'B')
+    definition.AppendEntry('D', None)
+    definition.Finalize()
+    self.assertEqual(collections.OrderedDict([('A', 0),
+                                              ('B', 0),
+                                              ('C', 0),
+                                              ('D', 1)]),
+                     definition.entries)
+
+  def testEnumValueAssignmentRaises(self):
+    definition = EnumDefinition(original_enum_name='c', enum_package='p')
+    definition.AppendEntry('A', None)
+    definition.AppendEntry('B', 'foo')
+    definition.AppendEntry('C', None)
+    with self.assertRaises(Exception):
+      definition.Finalize()
+
+  def testExplicitPrefixStripping(self):
+    definition = EnumDefinition(original_enum_name='c', enum_package='p')
+    definition.AppendEntry('P_A', None)
+    definition.AppendEntry('B', None)
+    definition.AppendEntry('P_C', None)
+    definition.AppendEntry('P_LAST', 'P_C')
+    definition.prefix_to_strip = 'P_'
+    definition.Finalize()
+    self.assertEqual(collections.OrderedDict([('A', 0),
+                                              ('B', 1),
+                                              ('C', 2),
+                                              ('LAST', 2)]),
+                     definition.entries)
+
+  def testImplicitPrefixStripping(self):
+    definition = EnumDefinition(original_enum_name='ClassName',
+                                enum_package='p')
+    definition.AppendEntry('CLASS_NAME_A', None)
+    definition.AppendEntry('CLASS_NAME_B', None)
+    definition.AppendEntry('CLASS_NAME_C', None)
+    definition.AppendEntry('CLASS_NAME_LAST', 'CLASS_NAME_C')
+    definition.Finalize()
+    self.assertEqual(collections.OrderedDict([('A', 0),
+                                              ('B', 1),
+                                              ('C', 2),
+                                              ('LAST', 2)]),
+                     definition.entries)
+
+  def testImplicitPrefixStrippingRequiresAllConstantsToBePrefixed(self):
+    definition = EnumDefinition(original_enum_name='Name',
+                                enum_package='p')
+    definition.AppendEntry('A', None)
+    definition.AppendEntry('B', None)
+    definition.AppendEntry('NAME_LAST', None)
+    definition.Finalize()
+    self.assertEqual(['A', 'B', 'NAME_LAST'], definition.entries.keys())
+
+  def testGenerateThrowsOnEmptyInput(self):
+    with self.assertRaises(Exception):
+      original_do_parse = java_cpp_enum.DoParseHeaderFile
+      try:
+        java_cpp_enum.DoParseHeaderFile = lambda _: []
+        for _ in java_cpp_enum.DoGenerate(['file']):
+          pass
+      finally:
+        java_cpp_enum.DoParseHeaderFile = original_do_parse
+
+def main(argv):
+  parser = optparse.OptionParser()
+  parser.add_option("--stamp", help="File to touch on success.")
+  options, _ = parser.parse_args(argv)
+
+  suite = unittest.TestLoader().loadTestsFromTestCase(TestPreprocess)
+  unittest.TextTestRunner(verbosity=0).run(suite)
+
+  if options.stamp:
+    build_utils.Touch(options.stamp)
+
+if __name__ == '__main__':
+  main(sys.argv[1:])
diff --git a/build/android/gyp/java_google_api_keys.py b/build/android/gyp/java_google_api_keys.py
new file mode 100755
index 0000000..95cb416
--- /dev/null
+++ b/build/android/gyp/java_google_api_keys.py
@@ -0,0 +1,129 @@
+#!/usr/bin/env python
+#
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Generates a Java file with API keys.
+
+import argparse
+import os
+import string
+import sys
+import zipfile
+
+from util import build_utils
+
+sys.path.append(
+    os.path.abspath(os.path.join(sys.path[0], '../../../google_apis')))
+import google_api_keys
+
+sys.path.append(os.path.abspath(os.path.join(
+    os.path.dirname(__file__), os.pardir)))
+from pylib.constants import host_paths
+
+
+PACKAGE = 'org.chromium.chrome'
+CLASSNAME = 'GoogleAPIKeys'
+
+
+def GetScriptName():
+  return os.path.relpath(__file__, host_paths.DIR_SOURCE_ROOT)
+
+
+def GenerateOutput(constant_definitions):
+  template = string.Template("""
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is autogenerated by
+//     ${SCRIPT_NAME}
+// From
+//     ${SOURCE_PATH}
+
+package ${PACKAGE};
+
+public class ${CLASS_NAME} {
+${CONSTANT_ENTRIES}
+}
+""")
+
+  constant_template = string.Template(
+      '  public static final String ${NAME} = "${VALUE}";')
+  constant_entries_list = []
+  for constant_name, constant_value in constant_definitions.iteritems():
+    values = {
+        'NAME': constant_name,
+        'VALUE': constant_value,
+    }
+    constant_entries_list.append(constant_template.substitute(values))
+  constant_entries_string = '\n'.join(constant_entries_list)
+
+  values = {
+      'CLASS_NAME': CLASSNAME,
+      'CONSTANT_ENTRIES': constant_entries_string,
+      'PACKAGE': PACKAGE,
+      'SCRIPT_NAME': GetScriptName(),
+      'SOURCE_PATH': 'google_api_keys/google_api_keys.h',
+  }
+  return template.substitute(values)
+
+
+def _DoWriteJavaOutput(output_path, constant_definition):
+  folder = os.path.dirname(output_path)
+  if folder and not os.path.exists(folder):
+    os.makedirs(folder)
+  with open(output_path, 'w') as out_file:
+    out_file.write(GenerateOutput(constant_definition))
+
+
+def _DoWriteJarOutput(output_path, constant_definition):
+  folder = os.path.dirname(output_path)
+  if folder and not os.path.exists(folder):
+    os.makedirs(folder)
+  with zipfile.ZipFile(output_path, 'w') as srcjar:
+    path = '%s/%s' % (PACKAGE.replace('.', '/'), CLASSNAME + '.java')
+    data = GenerateOutput(constant_definition)
+    build_utils.AddToZipHermetic(srcjar, path, data=data)
+
+
+def _DoMain(argv):
+  parser = argparse.ArgumentParser()
+  parser.add_argument("--out", help="Path for java output.")
+  parser.add_argument("--srcjar", help="Path for srcjar output.")
+  options = parser.parse_args(argv)
+  if not options.out and not options.srcjar:
+    parser.print_help()
+    sys.exit(-1)
+
+  values = {}
+  values['GOOGLE_API_KEY'] = google_api_keys.GetAPIKey()
+  values['GOOGLE_API_KEY_REMOTING'] = google_api_keys.GetAPIKeyRemoting()
+  values['GOOGLE_API_KEY_PHYSICAL_WEB_TEST'] = (google_api_keys.
+      GetAPIKeyPhysicalWebTest())
+  values['GOOGLE_CLIENT_ID_MAIN'] = google_api_keys.GetClientID('MAIN')
+  values['GOOGLE_CLIENT_SECRET_MAIN'] = google_api_keys.GetClientSecret('MAIN')
+  values['GOOGLE_CLIENT_ID_CLOUD_PRINT'] = google_api_keys.GetClientID(
+      'CLOUD_PRINT')
+  values['GOOGLE_CLIENT_SECRET_CLOUD_PRINT'] = google_api_keys.GetClientSecret(
+      'CLOUD_PRINT')
+  values['GOOGLE_CLIENT_ID_REMOTING'] = google_api_keys.GetClientID('REMOTING')
+  values['GOOGLE_CLIENT_SECRET_REMOTING'] = google_api_keys.GetClientSecret(
+      'REMOTING')
+  values['GOOGLE_CLIENT_ID_REMOTING_HOST'] = google_api_keys.GetClientID(
+      'REMOTING_HOST')
+  values['GOOGLE_CLIENT_SECRET_REMOTING_HOST'] = (google_api_keys.
+      GetClientSecret('REMOTING_HOST'))
+  values['GOOGLE_CLIENT_ID_REMOTING_IDENTITY_API'] = (google_api_keys.
+      GetClientID('REMOTING_IDENTITY_API'))
+
+  if options.out:
+    _DoWriteJavaOutput(options.out, values)
+  if options.srcjar:
+    _DoWriteJarOutput(options.srcjar, values)
+
+
+if __name__ == '__main__':
+  _DoMain(sys.argv[1:])
+
diff --git a/build/android/gyp/java_google_api_keys_tests.py b/build/android/gyp/java_google_api_keys_tests.py
new file mode 100755
index 0000000..eb24ea4
--- /dev/null
+++ b/build/android/gyp/java_google_api_keys_tests.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Tests for java_google_api_keys.py.
+
+This test suite contains various tests for the C++ -> Java Google API Keys
+generator.
+"""
+
+import collections
+import argparse
+import os
+import sys
+import unittest
+
+import java_google_api_keys
+
+sys.path.append(os.path.join(os.path.dirname(__file__), "gyp"))
+from util import build_utils
+
+
+class TestJavaGoogleAPIKeys(unittest.TestCase):
+  def testOutput(self):
+    definition = {'E1': 'abc', 'E2': 'defgh'}
+    output = java_google_api_keys.GenerateOutput(definition)
+    expected = """
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is autogenerated by
+//     %s
+// From
+//     google_api_keys/google_api_keys.h
+
+package org.chromium.chrome;
+
+public class GoogleAPIKeys {
+  public static final String E1 = "abc";
+  public static final String E2 = "defgh";
+}
+"""
+    self.assertEqual(expected % java_google_api_keys.GetScriptName(), output)
+
+
+def main(argv):
+  parser = argparse.ArgumentParser()
+  parser.add_argument("--stamp", help="File to touch on success.")
+  options = parser.parse_args(argv)
+
+  suite = unittest.TestLoader().loadTestsFromTestCase(TestJavaGoogleAPIKeys)
+  unittest.TextTestRunner(verbosity=0).run(suite)
+
+  if options.stamp:
+    build_utils.Touch(options.stamp)
+
+if __name__ == '__main__':
+  main(sys.argv[1:])
+
diff --git a/build/android/gyp/javac.py b/build/android/gyp/javac.py
new file mode 100755
index 0000000..5722fb1
--- /dev/null
+++ b/build/android/gyp/javac.py
@@ -0,0 +1,393 @@
+#!/usr/bin/env python
+#
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import optparse
+import os
+import shutil
+import re
+import sys
+import textwrap
+
+from util import build_utils
+from util import md5_check
+
+import jar
+
+sys.path.append(build_utils.COLORAMA_ROOT)
+import colorama
+
+
+def ColorJavacOutput(output):
+  fileline_prefix = r'(?P<fileline>(?P<file>[-.\w/\\]+.java):(?P<line>[0-9]+):)'
+  warning_re = re.compile(
+      fileline_prefix + r'(?P<full_message> warning: (?P<message>.*))$')
+  error_re = re.compile(
+      fileline_prefix + r'(?P<full_message> (?P<message>.*))$')
+  marker_re = re.compile(r'\s*(?P<marker>\^)\s*$')
+
+  warning_color = ['full_message', colorama.Fore.YELLOW + colorama.Style.DIM]
+  error_color = ['full_message', colorama.Fore.MAGENTA + colorama.Style.BRIGHT]
+  marker_color = ['marker',  colorama.Fore.BLUE + colorama.Style.BRIGHT]
+
+  def Colorize(line, regex, color):
+    match = regex.match(line)
+    start = match.start(color[0])
+    end = match.end(color[0])
+    return (line[:start]
+            + color[1] + line[start:end]
+            + colorama.Fore.RESET + colorama.Style.RESET_ALL
+            + line[end:])
+
+  def ApplyColor(line):
+    if warning_re.match(line):
+      line = Colorize(line, warning_re, warning_color)
+    elif error_re.match(line):
+      line = Colorize(line, error_re, error_color)
+    elif marker_re.match(line):
+      line = Colorize(line, marker_re, marker_color)
+    return line
+
+  return '\n'.join(map(ApplyColor, output.split('\n')))
+
+
+ERRORPRONE_OPTIONS = [
+  # These crash on lots of targets.
+  '-Xep:ParameterPackage:OFF',
+  '-Xep:OverridesGuiceInjectableMethod:OFF',
+  '-Xep:OverridesJavaxInjectableMethod:OFF',
+]
+
+
+def _FilterJavaFiles(paths, filters):
+  return [f for f in paths
+          if not filters or build_utils.MatchesGlob(f, filters)]
+
+
+_MAX_MANIFEST_LINE_LEN = 72
+
+
+def _ExtractClassFiles(jar_path, dest_dir, java_files):
+  """Extracts all .class files not corresponding to |java_files|."""
+  # Two challenges exist here:
+  # 1. |java_files| have prefixes that are not represented in the the jar paths.
+  # 2. A single .java file results in multiple .class files when it contains
+  #    nested classes.
+  # Here's an example:
+  #   source path: ../../base/android/java/src/org/chromium/Foo.java
+  #   jar paths: org/chromium/Foo.class, org/chromium/Foo$Inner.class
+  # To extract only .class files not related to the given .java files, we strip
+  # off ".class" and "$*.class" and use a substring match against java_files.
+  def extract_predicate(path):
+    if not path.endswith('.class'):
+      return False
+    path_without_suffix = re.sub(r'(?:\$|\.)[^/]+class$', '', path)
+    partial_java_path = path_without_suffix + '.java'
+    return not any(p.endswith(partial_java_path) for p in java_files)
+
+  build_utils.ExtractAll(jar_path, path=dest_dir, predicate=extract_predicate)
+  for path in build_utils.FindInDirectory(dest_dir, '*.class'):
+    shutil.copystat(jar_path, path)
+
+
+def _ConvertToJMakeArgs(javac_cmd, pdb_path):
+  new_args = ['bin/jmake', '-pdb', pdb_path]
+  if javac_cmd[0] != 'javac':
+    new_args.extend(('-jcexec', new_args[0]))
+  if md5_check.PRINT_EXPLANATIONS:
+    new_args.append('-Xtiming')
+
+  do_not_prefix = ('-classpath', '-bootclasspath')
+  skip_next = False
+  for arg in javac_cmd[1:]:
+    if not skip_next and arg not in do_not_prefix:
+      arg = '-C' + arg
+    new_args.append(arg)
+    skip_next = arg in do_not_prefix
+
+  return new_args
+
+
+def _FixTempPathsInIncrementalMetadata(pdb_path, temp_dir):
+  # The .pdb records absolute paths. Fix up paths within /tmp (srcjars).
+  if os.path.exists(pdb_path):
+    # Although its a binary file, search/replace still seems to work fine.
+    with open(pdb_path) as fileobj:
+      pdb_data = fileobj.read()
+    with open(pdb_path, 'w') as fileobj:
+      fileobj.write(re.sub(r'/tmp/[^/]*', temp_dir, pdb_data))
+
+
+def _OnStaleMd5(changes, options, javac_cmd, java_files, classpath_inputs):
+  with build_utils.TempDir() as temp_dir:
+    srcjars = options.java_srcjars
+    # The .excluded.jar contains .class files excluded from the main jar.
+    # It is used for incremental compiles.
+    excluded_jar_path = options.jar_path.replace('.jar', '.excluded.jar')
+
+    classes_dir = os.path.join(temp_dir, 'classes')
+    os.makedirs(classes_dir)
+
+    changed_paths = None
+    # jmake can handle deleted files, but it's a rare case and it would
+    # complicate this script's logic.
+    if options.incremental and changes.AddedOrModifiedOnly():
+      changed_paths = set(changes.IterChangedPaths())
+      # Do a full compile if classpath has changed.
+      # jmake doesn't seem to do this on its own... Might be that ijars mess up
+      # its change-detection logic.
+      if any(p in changed_paths for p in classpath_inputs):
+        changed_paths = None
+
+    if options.incremental:
+      # jmake is a compiler wrapper that figures out the minimal set of .java
+      # files that need to be rebuilt given a set of .java files that have
+      # changed.
+      # jmake determines what files are stale based on timestamps between .java
+      # and .class files. Since we use .jars, .srcjars, and md5 checks,
+      # timestamp info isn't accurate for this purpose. Rather than use jmake's
+      # programatic interface (like we eventually should), we ensure that all
+      # .class files are newer than their .java files, and convey to jmake which
+      # sources are stale by having their .class files be missing entirely
+      # (by not extracting them).
+      pdb_path = options.jar_path + '.pdb'
+      javac_cmd = _ConvertToJMakeArgs(javac_cmd, pdb_path)
+      if srcjars:
+        _FixTempPathsInIncrementalMetadata(pdb_path, temp_dir)
+
+    if srcjars:
+      java_dir = os.path.join(temp_dir, 'java')
+      os.makedirs(java_dir)
+      for srcjar in options.java_srcjars:
+        if changed_paths:
+          changed_paths.update(os.path.join(java_dir, f)
+                               for f in changes.IterChangedSubpaths(srcjar))
+        build_utils.ExtractAll(srcjar, path=java_dir, pattern='*.java')
+      jar_srcs = build_utils.FindInDirectory(java_dir, '*.java')
+      jar_srcs = _FilterJavaFiles(jar_srcs, options.javac_includes)
+      java_files.extend(jar_srcs)
+      if changed_paths:
+        # Set the mtime of all sources to 0 since we use the absense of .class
+        # files to tell jmake which files are stale.
+        for path in jar_srcs:
+          os.utime(path, (0, 0))
+
+    if java_files:
+      if changed_paths:
+        changed_java_files = [p for p in java_files if p in changed_paths]
+        if os.path.exists(options.jar_path):
+          _ExtractClassFiles(options.jar_path, classes_dir, changed_java_files)
+        if os.path.exists(excluded_jar_path):
+          _ExtractClassFiles(excluded_jar_path, classes_dir, changed_java_files)
+        # Add the extracted files to the classpath. This is required because
+        # when compiling only a subset of files, classes that haven't changed
+        # need to be findable.
+        classpath_idx = javac_cmd.index('-classpath')
+        javac_cmd[classpath_idx + 1] += ':' + classes_dir
+
+      # Can happen when a target goes from having no sources, to having sources.
+      # It's created by the call to build_utils.Touch() below.
+      if options.incremental:
+        if os.path.exists(pdb_path) and not os.path.getsize(pdb_path):
+          os.unlink(pdb_path)
+
+      # Don't include the output directory in the initial set of args since it
+      # being in a temp dir makes it unstable (breaks md5 stamping).
+      cmd = javac_cmd + ['-d', classes_dir] + java_files
+
+      # JMake prints out some diagnostic logs that we want to ignore.
+      # This assumes that all compiler output goes through stderr.
+      stdout_filter = lambda s: ''
+      if md5_check.PRINT_EXPLANATIONS:
+        stdout_filter = None
+
+      attempt_build = lambda: build_utils.CheckOutput(
+          cmd,
+          print_stdout=options.chromium_code,
+          stdout_filter=stdout_filter,
+          stderr_filter=ColorJavacOutput)
+      try:
+        attempt_build()
+      except build_utils.CalledProcessError as e:
+        # Work-around for a bug in jmake (http://crbug.com/551449).
+        if 'project database corrupted' not in e.output:
+          raise
+        print ('Applying work-around for jmake project database corrupted '
+               '(http://crbug.com/551449).')
+        os.unlink(pdb_path)
+        attempt_build()
+    elif options.incremental:
+      # Make sure output exists.
+      build_utils.Touch(pdb_path)
+
+    glob = options.jar_excluded_classes
+    inclusion_predicate = lambda f: not build_utils.MatchesGlob(f, glob)
+    exclusion_predicate = lambda f: not inclusion_predicate(f)
+
+    jar.JarDirectory(classes_dir,
+                     options.jar_path,
+                     predicate=inclusion_predicate)
+    jar.JarDirectory(classes_dir,
+                     excluded_jar_path,
+                     predicate=exclusion_predicate)
+
+
+def _ParseOptions(argv):
+  parser = optparse.OptionParser()
+  build_utils.AddDepfileOption(parser)
+
+  parser.add_option(
+      '--src-gendirs',
+      help='Directories containing generated java files.')
+  parser.add_option(
+      '--java-srcjars',
+      action='append',
+      default=[],
+      help='List of srcjars to include in compilation.')
+  parser.add_option(
+      '--bootclasspath',
+      action='append',
+      default=[],
+      help='Boot classpath for javac. If this is specified multiple times, '
+      'they will all be appended to construct the classpath.')
+  parser.add_option(
+      '--classpath',
+      action='append',
+      help='Classpath for javac. If this is specified multiple times, they '
+      'will all be appended to construct the classpath.')
+  parser.add_option(
+      '--incremental',
+      action='store_true',
+      help='Whether to re-use .class files rather than recompiling them '
+           '(when possible).')
+  parser.add_option(
+      '--javac-includes',
+      default='',
+      help='A list of file patterns. If provided, only java files that match'
+      'one of the patterns will be compiled.')
+  parser.add_option(
+      '--jar-excluded-classes',
+      default='',
+      help='List of .class file patterns to exclude from the jar.')
+  parser.add_option(
+      '--chromium-code',
+      type='int',
+      help='Whether code being compiled should be built with stricter '
+      'warnings for chromium code.')
+  parser.add_option(
+      '--use-errorprone-path',
+      help='Use the Errorprone compiler at this path.')
+  parser.add_option('--jar-path', help='Jar output path.')
+  parser.add_option('--stamp', help='Path to touch on success.')
+
+  options, args = parser.parse_args(argv)
+  build_utils.CheckOptions(options, parser, required=('jar_path',))
+
+  bootclasspath = []
+  for arg in options.bootclasspath:
+    bootclasspath += build_utils.ParseGypList(arg)
+  options.bootclasspath = bootclasspath
+
+  classpath = []
+  for arg in options.classpath:
+    classpath += build_utils.ParseGypList(arg)
+  options.classpath = classpath
+
+  java_srcjars = []
+  for arg in options.java_srcjars:
+    java_srcjars += build_utils.ParseGypList(arg)
+  options.java_srcjars = java_srcjars
+
+  if options.src_gendirs:
+    options.src_gendirs = build_utils.ParseGypList(options.src_gendirs)
+
+  options.javac_includes = build_utils.ParseGypList(options.javac_includes)
+  options.jar_excluded_classes = (
+      build_utils.ParseGypList(options.jar_excluded_classes))
+  return options, args
+
+
+def main(argv):
+  colorama.init()
+
+  argv = build_utils.ExpandFileArgs(argv)
+  options, java_files = _ParseOptions(argv)
+
+  if options.src_gendirs:
+    java_files += build_utils.FindInDirectories(options.src_gendirs, '*.java')
+
+  java_files = _FilterJavaFiles(java_files, options.javac_includes)
+
+  javac_cmd = ['javac']
+  if options.use_errorprone_path:
+    javac_cmd = [options.use_errorprone_path] + ERRORPRONE_OPTIONS
+
+  javac_cmd.extend((
+      '-g',
+      # Chromium only allows UTF8 source files.  Being explicit avoids
+      # javac pulling a default encoding from the user's environment.
+      '-encoding', 'UTF-8',
+      '-classpath', ':'.join(options.classpath),
+      # Prevent compiler from compiling .java files not listed as inputs.
+      # See: http://blog.ltgt.net/most-build-tools-misuse-javac/
+      '-sourcepath', ''
+  ))
+
+  if options.bootclasspath:
+    javac_cmd.extend([
+        '-bootclasspath', ':'.join(options.bootclasspath),
+        '-source', '1.7',
+        '-target', '1.7',
+        ])
+
+  if options.chromium_code:
+    javac_cmd.extend(['-Xlint:unchecked', '-Xlint:deprecation'])
+  else:
+    # XDignore.symbol.file makes javac compile against rt.jar instead of
+    # ct.sym. This means that using a java internal package/class will not
+    # trigger a compile warning or error.
+    javac_cmd.extend(['-XDignore.symbol.file'])
+
+  classpath_inputs = options.bootclasspath
+  if options.classpath:
+    if options.classpath[0].endswith('.interface.jar'):
+      classpath_inputs.extend(options.classpath)
+    else:
+      # TODO(agrieve): Remove this .TOC heuristic once GYP is no more.
+      for path in options.classpath:
+        if os.path.exists(path + '.TOC'):
+          classpath_inputs.append(path + '.TOC')
+        else:
+          classpath_inputs.append(path)
+
+  # Compute the list of paths that when changed, we need to rebuild.
+  input_paths = classpath_inputs + options.java_srcjars + java_files
+
+  output_paths = [
+      options.jar_path,
+      options.jar_path.replace('.jar', '.excluded.jar'),
+  ]
+  if options.incremental:
+    output_paths.append(options.jar_path + '.pdb')
+
+  # An escape hatch to be able to check if incremental compiles are causing
+  # problems.
+  force = int(os.environ.get('DISABLE_INCREMENTAL_JAVAC', 0))
+
+  # List python deps in input_strings rather than input_paths since the contents
+  # of them does not change what gets written to the depsfile.
+  build_utils.CallAndWriteDepfileIfStale(
+      lambda changes: _OnStaleMd5(changes, options, javac_cmd, java_files,
+                                  classpath_inputs),
+      options,
+      input_paths=input_paths,
+      input_strings=javac_cmd,
+      output_paths=output_paths,
+      force=force,
+      pass_changes=True)
+
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv[1:]))
diff --git a/build/android/gyp/jinja_template.py b/build/android/gyp/jinja_template.py
new file mode 100755
index 0000000..7e9624b
--- /dev/null
+++ b/build/android/gyp/jinja_template.py
@@ -0,0 +1,124 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Renders one or more template files using the Jinja template engine."""
+
+import codecs
+import optparse
+import os
+import sys
+
+from util import build_utils
+
+sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
+from pylib.constants import host_paths
+
+# Import jinja2 from third_party/jinja2
+sys.path.append(os.path.join(host_paths.DIR_SOURCE_ROOT, 'third_party'))
+import jinja2  # pylint: disable=F0401
+
+
+class RecordingFileSystemLoader(jinja2.FileSystemLoader):
+  '''A FileSystemLoader that stores a list of loaded templates.'''
+  def __init__(self, searchpath):
+    jinja2.FileSystemLoader.__init__(self, searchpath)
+    self.loaded_templates = set()
+
+  def get_source(self, environment, template):
+    contents, filename, uptodate = jinja2.FileSystemLoader.get_source(
+        self, environment, template)
+    self.loaded_templates.add(os.path.relpath(filename))
+    return contents, filename, uptodate
+
+  def get_loaded_templates(self):
+    return list(self.loaded_templates)
+
+
+def ProcessFile(env, input_filename, loader_base_dir, output_filename,
+                variables):
+  input_rel_path = os.path.relpath(input_filename, loader_base_dir)
+  template = env.get_template(input_rel_path)
+  output = template.render(variables)
+  with codecs.open(output_filename, 'w', 'utf-8') as output_file:
+    output_file.write(output)
+
+
+def ProcessFiles(env, input_filenames, loader_base_dir, inputs_base_dir,
+                 outputs_zip, variables):
+  with build_utils.TempDir() as temp_dir:
+    for input_filename in input_filenames:
+      relpath = os.path.relpath(os.path.abspath(input_filename),
+                                os.path.abspath(inputs_base_dir))
+      if relpath.startswith(os.pardir):
+        raise Exception('input file %s is not contained in inputs base dir %s'
+                        % (input_filename, inputs_base_dir))
+
+      output_filename = os.path.join(temp_dir, relpath)
+      parent_dir = os.path.dirname(output_filename)
+      build_utils.MakeDirectory(parent_dir)
+      ProcessFile(env, input_filename, loader_base_dir, output_filename,
+                  variables)
+
+    build_utils.ZipDir(outputs_zip, temp_dir)
+
+
+def main():
+  parser = optparse.OptionParser()
+  build_utils.AddDepfileOption(parser)
+  parser.add_option('--inputs', help='The template files to process.')
+  parser.add_option('--output', help='The output file to generate. Valid '
+                    'only if there is a single input.')
+  parser.add_option('--outputs-zip', help='A zip file containing the processed '
+                    'templates. Required if there are multiple inputs.')
+  parser.add_option('--inputs-base-dir', help='A common ancestor directory of '
+                    'the inputs. Each output\'s path in the output zip will '
+                    'match the relative path from INPUTS_BASE_DIR to the '
+                    'input. Required if --output-zip is given.')
+  parser.add_option('--loader-base-dir', help='Base path used by the template '
+                    'loader. Must be a common ancestor directory of '
+                    'the inputs. Defaults to DIR_SOURCE_ROOT.',
+                    default=host_paths.DIR_SOURCE_ROOT)
+  parser.add_option('--variables', help='Variables to be made available in the '
+                    'template processing environment, as a GYP list (e.g. '
+                    '--variables "channel=beta mstone=39")', default='')
+  options, args = parser.parse_args()
+
+  build_utils.CheckOptions(options, parser, required=['inputs'])
+  inputs = build_utils.ParseGypList(options.inputs)
+
+  if (options.output is None) == (options.outputs_zip is None):
+    parser.error('Exactly one of --output and --output-zip must be given')
+  if options.output and len(inputs) != 1:
+    parser.error('--output cannot be used with multiple inputs')
+  if options.outputs_zip and not options.inputs_base_dir:
+    parser.error('--inputs-base-dir must be given when --output-zip is used')
+  if args:
+    parser.error('No positional arguments should be given.')
+
+  variables = {}
+  for v in build_utils.ParseGypList(options.variables):
+    if '=' not in v:
+      parser.error('--variables argument must contain "=": ' + v)
+    name, _, value = v.partition('=')
+    variables[name] = value
+
+  loader = RecordingFileSystemLoader(options.loader_base_dir)
+  env = jinja2.Environment(loader=loader, undefined=jinja2.StrictUndefined,
+                           line_comment_prefix='##')
+  if options.output:
+    ProcessFile(env, inputs[0], options.loader_base_dir, options.output,
+                variables)
+  else:
+    ProcessFiles(env, inputs, options.loader_base_dir, options.inputs_base_dir,
+                 options.outputs_zip, variables)
+
+  if options.depfile:
+    deps = loader.get_loaded_templates() + build_utils.GetPythonDependencies()
+    build_utils.WriteDepfile(options.depfile, deps)
+
+
+if __name__ == '__main__':
+  main()
diff --git a/build/android/gyp/lint.py b/build/android/gyp/lint.py
new file mode 100755
index 0000000..2efe9f8
--- /dev/null
+++ b/build/android/gyp/lint.py
@@ -0,0 +1,321 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Runs Android's lint tool."""
+
+
+import argparse
+import os
+import re
+import sys
+import traceback
+from xml.dom import minidom
+
+from util import build_utils
+
+_LINT_MD_URL = 'https://chromium.googlesource.com/chromium/src/+/master/build/android/docs/lint.md' # pylint: disable=line-too-long
+_SRC_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__),
+                                         '..', '..', '..'))
+
+
+def _OnStaleMd5(lint_path, config_path, processed_config_path,
+                manifest_path, result_path, product_dir, sources, jar_path,
+                cache_dir, android_sdk_version, resource_dir=None,
+                classpath=None, can_fail_build=False, silent=False):
+  def _RelativizePath(path):
+    """Returns relative path to top-level src dir.
+
+    Args:
+      path: A path relative to cwd.
+    """
+    return os.path.relpath(os.path.abspath(path), _SRC_ROOT)
+
+  def _ProcessConfigFile():
+    if not config_path or not processed_config_path:
+      return
+    if not build_utils.IsTimeStale(processed_config_path, [config_path]):
+      return
+
+    with open(config_path, 'rb') as f:
+      content = f.read().replace(
+          'PRODUCT_DIR', _RelativizePath(product_dir))
+
+    with open(processed_config_path, 'wb') as f:
+      f.write(content)
+
+  def _ProcessResultFile():
+    with open(result_path, 'rb') as f:
+      content = f.read().replace(
+          _RelativizePath(product_dir), 'PRODUCT_DIR')
+
+    with open(result_path, 'wb') as f:
+      f.write(content)
+
+  def _ParseAndShowResultFile():
+    dom = minidom.parse(result_path)
+    issues = dom.getElementsByTagName('issue')
+    if not silent:
+      print >> sys.stderr
+      for issue in issues:
+        issue_id = issue.attributes['id'].value
+        message = issue.attributes['message'].value
+        location_elem = issue.getElementsByTagName('location')[0]
+        path = location_elem.attributes['file'].value
+        line = location_elem.getAttribute('line')
+        if line:
+          error = '%s:%s %s: %s [warning]' % (path, line, message, issue_id)
+        else:
+          # Issues in class files don't have a line number.
+          error = '%s %s: %s [warning]' % (path, message, issue_id)
+        print >> sys.stderr, error.encode('utf-8')
+        for attr in ['errorLine1', 'errorLine2']:
+          error_line = issue.getAttribute(attr)
+          if error_line:
+            print >> sys.stderr, error_line.encode('utf-8')
+    return len(issues)
+
+  with build_utils.TempDir() as temp_dir:
+    _ProcessConfigFile()
+
+    cmd = [
+        _RelativizePath(lint_path), '-Werror', '--exitcode', '--showall',
+        '--xml', _RelativizePath(result_path),
+    ]
+    if jar_path:
+      # --classpath is just for .class files for this one target.
+      cmd.extend(['--classpath', _RelativizePath(jar_path)])
+    if processed_config_path:
+      cmd.extend(['--config', _RelativizePath(processed_config_path)])
+    if resource_dir:
+      cmd.extend(['--resources', _RelativizePath(resource_dir)])
+    if classpath:
+      # --libraries is the classpath (excluding active target).
+      cp = ':'.join(_RelativizePath(p) for p in classpath)
+      cmd.extend(['--libraries', cp])
+
+    # There may be multiple source files with the same basename (but in
+    # different directories). It is difficult to determine what part of the path
+    # corresponds to the java package, and so instead just link the source files
+    # into temporary directories (creating a new one whenever there is a name
+    # conflict).
+    src_dirs = []
+    def NewSourceDir():
+      new_dir = os.path.join(temp_dir, str(len(src_dirs)))
+      os.mkdir(new_dir)
+      src_dirs.append(new_dir)
+      return new_dir
+
+    def PathInDir(d, src):
+      return os.path.join(d, os.path.basename(src))
+
+    for src in sources:
+      src_dir = None
+      for d in src_dirs:
+        if not os.path.exists(PathInDir(d, src)):
+          src_dir = d
+          break
+      if not src_dir:
+        src_dir = NewSourceDir()
+        cmd.extend(['--sources', _RelativizePath(src_dir)])
+      os.symlink(os.path.abspath(src), PathInDir(src_dir, src))
+
+    project_dir = NewSourceDir()
+    if android_sdk_version:
+      # Create dummy project.properies file in a temporary "project" directory.
+      # It is the only way to add Android SDK to the Lint's classpath. Proper
+      # classpath is necessary for most source-level checks.
+      with open(os.path.join(project_dir, 'project.properties'), 'w') \
+          as propfile:
+        print >> propfile, 'target=android-{}'.format(android_sdk_version)
+
+    # Put the manifest in a temporary directory in order to avoid lint detecting
+    # sibling res/ and src/ directories (which should be pass explicitly if they
+    # are to be included).
+    if manifest_path:
+      os.symlink(os.path.abspath(manifest_path),
+                 PathInDir(project_dir, manifest_path))
+    cmd.append(project_dir)
+
+    if os.path.exists(result_path):
+      os.remove(result_path)
+
+    env = {}
+    stderr_filter = None
+    if cache_dir:
+      env['_JAVA_OPTIONS'] = '-Duser.home=%s' % _RelativizePath(cache_dir)
+      # When _JAVA_OPTIONS is set, java prints to stderr:
+      # Picked up _JAVA_OPTIONS: ...
+      #
+      # We drop all lines that contain _JAVA_OPTIONS from the output
+      stderr_filter = lambda l: re.sub(r'.*_JAVA_OPTIONS.*\n?', '', l)
+
+    try:
+      build_utils.CheckOutput(cmd, cwd=_SRC_ROOT, env=env or None,
+                              stderr_filter=stderr_filter)
+    except build_utils.CalledProcessError:
+      # There is a problem with lint usage
+      if not os.path.exists(result_path):
+        raise
+
+      # Sometimes produces empty (almost) files:
+      if os.path.getsize(result_path) < 10:
+        if can_fail_build:
+          raise
+        elif not silent:
+          traceback.print_exc()
+        return
+
+      # There are actual lint issues
+      try:
+        num_issues = _ParseAndShowResultFile()
+      except Exception: # pylint: disable=broad-except
+        if not silent:
+          print 'Lint created unparseable xml file...'
+          print 'File contents:'
+          with open(result_path) as f:
+            print f.read()
+        if not can_fail_build:
+          return
+
+      if can_fail_build and not silent:
+        traceback.print_exc()
+
+      # There are actual lint issues
+      try:
+        num_issues = _ParseAndShowResultFile()
+      except Exception: # pylint: disable=broad-except
+        if not silent:
+          print 'Lint created unparseable xml file...'
+          print 'File contents:'
+          with open(result_path) as f:
+            print f.read()
+        raise
+
+      _ProcessResultFile()
+      msg = ('\nLint found %d new issues.\n'
+             ' - For full explanation, please refer to %s\n'
+             ' - For more information about lint and how to fix lint issues,'
+             ' please refer to %s\n' %
+             (num_issues,
+              _RelativizePath(result_path),
+              _LINT_MD_URL))
+      if not silent:
+        print >> sys.stderr, msg
+      if can_fail_build:
+        raise Exception('Lint failed.')
+
+
+def main():
+  parser = argparse.ArgumentParser()
+  build_utils.AddDepfileOption(parser)
+
+  parser.add_argument('--lint-path', required=True,
+                      help='Path to lint executable.')
+  parser.add_argument('--product-dir', required=True,
+                      help='Path to product dir.')
+  parser.add_argument('--result-path', required=True,
+                      help='Path to XML lint result file.')
+  parser.add_argument('--cache-dir', required=True,
+                      help='Path to the directory in which the android cache '
+                           'directory tree should be stored.')
+  parser.add_argument('--platform-xml-path', required=True,
+                      help='Path to api-platforms.xml')
+  parser.add_argument('--android-sdk-version',
+                      help='Version (API level) of the Android SDK used for '
+                           'building.')
+  parser.add_argument('--create-cache', action='store_true',
+                      help='Mark the lint cache file as an output rather than '
+                      'an input.')
+  parser.add_argument('--can-fail-build', action='store_true',
+                      help='If set, script will exit with nonzero exit status'
+                           ' if lint errors are present')
+  parser.add_argument('--config-path',
+                      help='Path to lint suppressions file.')
+  parser.add_argument('--enable', action='store_true',
+                      help='Run lint instead of just touching stamp.')
+  parser.add_argument('--jar-path',
+                      help='Jar file containing class files.')
+  parser.add_argument('--java-files',
+                      help='Paths to java files.')
+  parser.add_argument('--manifest-path',
+                      help='Path to AndroidManifest.xml')
+  parser.add_argument('--classpath', default=[], action='append',
+                      help='GYP-list of classpath .jar files')
+  parser.add_argument('--processed-config-path',
+                      help='Path to processed lint suppressions file.')
+  parser.add_argument('--resource-dir',
+                      help='Path to resource dir.')
+  parser.add_argument('--silent', action='store_true',
+                      help='If set, script will not log anything.')
+  parser.add_argument('--src-dirs',
+                      help='Directories containing java files.')
+  parser.add_argument('--stamp',
+                      help='Path to touch on success.')
+
+  args = parser.parse_args(build_utils.ExpandFileArgs(sys.argv[1:]))
+
+  if args.enable:
+    sources = []
+    if args.src_dirs:
+      src_dirs = build_utils.ParseGypList(args.src_dirs)
+      sources = build_utils.FindInDirectories(src_dirs, '*.java')
+    elif args.java_files:
+      sources = build_utils.ParseGypList(args.java_files)
+
+    if args.config_path and not args.processed_config_path:
+      parser.error('--config-path specified without --processed-config-path')
+    elif args.processed_config_path and not args.config_path:
+      parser.error('--processed-config-path specified without --config-path')
+
+    input_paths = [
+        args.lint_path,
+        args.platform_xml_path,
+    ]
+    if args.config_path:
+      input_paths.append(args.config_path)
+    if args.jar_path:
+      input_paths.append(args.jar_path)
+    if args.manifest_path:
+      input_paths.append(args.manifest_path)
+    if args.resource_dir:
+      input_paths.extend(build_utils.FindInDirectory(args.resource_dir, '*'))
+    if sources:
+      input_paths.extend(sources)
+    classpath = []
+    for gyp_list in args.classpath:
+      classpath.extend(build_utils.ParseGypList(gyp_list))
+    input_paths.extend(classpath)
+
+    input_strings = []
+    if args.android_sdk_version:
+      input_strings.append(args.android_sdk_version)
+    if args.processed_config_path:
+      input_strings.append(args.processed_config_path)
+
+    output_paths = [ args.result_path ]
+
+    build_utils.CallAndWriteDepfileIfStale(
+        lambda: _OnStaleMd5(args.lint_path,
+                            args.config_path,
+                            args.processed_config_path,
+                            args.manifest_path, args.result_path,
+                            args.product_dir, sources,
+                            args.jar_path,
+                            args.cache_dir,
+                            args.android_sdk_version,
+                            resource_dir=args.resource_dir,
+                            classpath=classpath,
+                            can_fail_build=args.can_fail_build,
+                            silent=args.silent),
+        args,
+        input_paths=input_paths,
+        input_strings=input_strings,
+        output_paths=output_paths,
+        depfile_deps=classpath)
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/build/android/gyp/locale_pak_resources.py b/build/android/gyp/locale_pak_resources.py
new file mode 100755
index 0000000..84c4a37
--- /dev/null
+++ b/build/android/gyp/locale_pak_resources.py
@@ -0,0 +1,126 @@
+#!/usr/bin/env python
+#
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Creates a resources.zip for locale .pak files.
+
+Places the locale.pak files into appropriate resource configs
+(e.g. en-GB.pak -> res/raw-en/en_gb.lpak). Also generates a locale_paks
+TypedArray so that resource files can be enumerated at runtime.
+"""
+
+import collections
+import optparse
+import os
+import sys
+import zipfile
+
+from util import build_utils
+
+
+# This should stay in sync with:
+# base/android/java/src/org/chromium/base/LocaleUtils.java
+_CHROME_TO_ANDROID_LOCALE_MAP = {
+    'he': 'iw',
+    'id': 'in',
+    'fil': 'tl',
+}
+
+
+def ToResourceFileName(name):
+  """Returns the resource-compatible file name for the given file."""
+  # Resources file names must consist of [a-z0-9_.].
+  # Changes extension to .lpak so that compression can be toggled separately for
+  # locale pak files vs other pak files.
+  return name.replace('-', '_').replace('.pak', '.lpak').lower()
+
+
+def CreateLocalePaksXml(names):
+  """Creates the contents for the locale-paks.xml files."""
+  VALUES_FILE_TEMPLATE = '''<?xml version="1.0" encoding="utf-8"?>
+<resources>
+  <array name="locale_paks">%s
+  </array>
+</resources>
+'''
+  VALUES_ITEM_TEMPLATE = '''
+    <item>@raw/%s</item>'''
+
+  res_names = (os.path.splitext(name)[0] for name in names)
+  items = ''.join((VALUES_ITEM_TEMPLATE % name for name in res_names))
+  return VALUES_FILE_TEMPLATE % items
+
+
+def ComputeMappings(sources):
+  """Computes the mappings of sources -> resources.
+
+  Returns a tuple of:
+    - mappings: List of (src, dest) paths
+    - lang_to_locale_map: Map of language -> list of resource names
+      e.g. "en" -> ["en_gb.lpak"]
+  """
+  lang_to_locale_map = collections.defaultdict(list)
+  mappings = []
+  for src_path in sources:
+    basename = os.path.basename(src_path)
+    name = os.path.splitext(basename)[0]
+    res_name = ToResourceFileName(basename)
+    if name == 'en-US':
+      dest_dir = 'raw'
+    else:
+      # Chrome's uses different region mapping logic from Android, so include
+      # all regions for each language.
+      android_locale = _CHROME_TO_ANDROID_LOCALE_MAP.get(name, name)
+      lang = android_locale[0:2]
+      dest_dir = 'raw-' + lang
+      lang_to_locale_map[lang].append(res_name)
+    mappings.append((src_path, os.path.join(dest_dir, res_name)))
+  return mappings, lang_to_locale_map
+
+
+def main():
+  parser = optparse.OptionParser()
+  build_utils.AddDepfileOption(parser)
+  parser.add_option('--locale-paks', help='List of files for res/raw-LOCALE')
+  parser.add_option('--resources-zip', help='Path to output resources.zip')
+  parser.add_option('--print-languages',
+      action='store_true',
+      help='Print out the list of languages that cover the given locale paks '
+           '(using Android\'s language codes)')
+
+  options, _ = parser.parse_args()
+  build_utils.CheckOptions(options, parser,
+                           required=['locale_paks'])
+
+  sources = build_utils.ParseGypList(options.locale_paks)
+
+  if options.depfile:
+    deps = sources + build_utils.GetPythonDependencies()
+    build_utils.WriteDepfile(options.depfile, deps)
+
+  mappings, lang_to_locale_map = ComputeMappings(sources)
+  if options.print_languages:
+    print '\n'.join(sorted(lang_to_locale_map))
+
+  if options.resources_zip:
+    with zipfile.ZipFile(options.resources_zip, 'w', zipfile.ZIP_STORED) as out:
+      for mapping in mappings:
+        out.write(mapping[0], mapping[1])
+
+      # Create TypedArray resources so ResourceExtractor can enumerate files.
+      def WriteValuesFile(lang, names):
+        dest_dir = 'values'
+        if lang:
+          dest_dir += '-' + lang
+        # Always extract en-US.lpak since it's the fallback.
+        xml = CreateLocalePaksXml(names + ['en_us.lpak'])
+        out.writestr(os.path.join(dest_dir, 'locale-paks.xml'), xml)
+
+      for lang, names in lang_to_locale_map.iteritems():
+        WriteValuesFile(lang, names)
+      WriteValuesFile(None, [])
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/build/android/gyp/main_dex_list.py b/build/android/gyp/main_dex_list.py
new file mode 100755
index 0000000..7388f4a
--- /dev/null
+++ b/build/android/gyp/main_dex_list.py
@@ -0,0 +1,138 @@
+#!/usr/bin/env python
+#
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import argparse
+import json
+import os
+import sys
+import tempfile
+
+from util import build_utils
+
+sys.path.append(os.path.abspath(os.path.join(
+    os.path.dirname(__file__), os.pardir)))
+from pylib import constants
+
+
+def main(args):
+  parser = argparse.ArgumentParser()
+  build_utils.AddDepfileOption(parser)
+  parser.add_argument('--android-sdk-tools', required=True,
+                      help='Android sdk build tools directory.')
+  parser.add_argument('--main-dex-rules-path', action='append', default=[],
+                      dest='main_dex_rules_paths',
+                      help='A file containing a list of proguard rules to use '
+                           'in determining the class to include in the '
+                           'main dex.')
+  parser.add_argument('--main-dex-list-path', required=True,
+                      help='The main dex list file to generate.')
+  parser.add_argument('--enabled-configurations',
+                      help='The build configurations for which a main dex list'
+                           ' should be generated.')
+  parser.add_argument('--configuration-name',
+                      help='The current build configuration.')
+  parser.add_argument('--multidex-configuration-path',
+                      help='A JSON file containing multidex build '
+                           'configuration.')
+  parser.add_argument('--inputs',
+                      help='JARs for which a main dex list should be '
+                           'generated.')
+  parser.add_argument('paths', nargs='*', default=[],
+                      help='JARs for which a main dex list should be '
+                           'generated.')
+
+  args = parser.parse_args(build_utils.ExpandFileArgs(args))
+
+  if args.multidex_configuration_path:
+    with open(args.multidex_configuration_path) as multidex_config_file:
+      multidex_config = json.loads(multidex_config_file.read())
+
+    if not multidex_config.get('enabled', False):
+      return 0
+
+  if args.inputs:
+    args.paths.extend(build_utils.ParseGypList(args.inputs))
+
+  shrinked_android_jar = os.path.abspath(
+      os.path.join(args.android_sdk_tools, 'lib', 'shrinkedAndroid.jar'))
+  dx_jar = os.path.abspath(
+      os.path.join(args.android_sdk_tools, 'lib', 'dx.jar'))
+  rules_file = os.path.abspath(
+      os.path.join(args.android_sdk_tools, 'mainDexClasses.rules'))
+
+  proguard_cmd = [
+    constants.PROGUARD_SCRIPT_PATH,
+    '-forceprocessing',
+    '-dontwarn', '-dontoptimize', '-dontobfuscate', '-dontpreverify',
+    '-libraryjars', shrinked_android_jar,
+    '-include', rules_file,
+  ]
+  for m in args.main_dex_rules_paths:
+    proguard_cmd.extend(['-include', m])
+
+  main_dex_list_cmd = [
+    'java', '-cp', dx_jar,
+    'com.android.multidex.MainDexListBuilder',
+  ]
+
+  input_paths = list(args.paths)
+  input_paths += [
+    shrinked_android_jar,
+    dx_jar,
+    rules_file,
+  ]
+  input_paths += args.main_dex_rules_paths
+
+  input_strings = [
+    proguard_cmd,
+    main_dex_list_cmd,
+  ]
+
+  output_paths = [
+    args.main_dex_list_path,
+  ]
+
+  build_utils.CallAndWriteDepfileIfStale(
+      lambda: _OnStaleMd5(proguard_cmd, main_dex_list_cmd, args.paths,
+                          args.main_dex_list_path),
+      args,
+      input_paths=input_paths,
+      input_strings=input_strings,
+      output_paths=output_paths)
+
+  return 0
+
+
+def _OnStaleMd5(proguard_cmd, main_dex_list_cmd, paths, main_dex_list_path):
+  paths_arg = ':'.join(paths)
+  main_dex_list = ''
+  try:
+    with tempfile.NamedTemporaryFile(suffix='.jar') as temp_jar:
+      proguard_cmd += [
+        '-injars', paths_arg,
+        '-outjars', temp_jar.name
+      ]
+      build_utils.CheckOutput(proguard_cmd, print_stderr=False)
+
+      main_dex_list_cmd += [
+        temp_jar.name, paths_arg
+      ]
+      main_dex_list = build_utils.CheckOutput(main_dex_list_cmd)
+  except build_utils.CalledProcessError as e:
+    if 'output jar is empty' in e.output:
+      pass
+    elif "input doesn't contain any classes" in e.output:
+      pass
+    else:
+      raise
+
+  with open(main_dex_list_path, 'w') as main_dex_list_file:
+    main_dex_list_file.write(main_dex_list)
+
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv[1:]))
+
diff --git a/build/android/gyp/pack_relocations.py b/build/android/gyp/pack_relocations.py
new file mode 100755
index 0000000..1a4824a
--- /dev/null
+++ b/build/android/gyp/pack_relocations.py
@@ -0,0 +1,114 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Pack relocations in a library (or copy unchanged).
+
+If --enable-packing and --configuration-name=='Release', invoke the
+relocation_packer tool to pack the .rel.dyn or .rela.dyn section in the given
+library files.  This step is inserted after the libraries are stripped.
+
+If --enable-packing is zero, the script copies files verbatim, with no
+attempt to pack relocations.
+
+Any library listed in --exclude-packing-list is also copied verbatim,
+irrespective of any --enable-packing setting.  Typically this would be
+'libchromium_android_linker.so'.
+"""
+
+import optparse
+import os
+import shutil
+import sys
+import tempfile
+
+from util import build_utils
+
+def PackLibraryRelocations(android_pack_relocations, library_path, output_path):
+  shutil.copy(library_path, output_path)
+  pack_command = [android_pack_relocations, output_path]
+  build_utils.CheckOutput(pack_command)
+
+
+def CopyLibraryUnchanged(library_path, output_path):
+  shutil.copy(library_path, output_path)
+
+
+def main(args):
+  args = build_utils.ExpandFileArgs(args)
+  parser = optparse.OptionParser()
+  build_utils.AddDepfileOption(parser)
+  parser.add_option('--clear-dir', action='store_true',
+                    help='If set, the destination directory will be deleted '
+                    'before copying files to it. This is highly recommended to '
+                    'ensure that no stale files are left in the directory.')
+
+  parser.add_option('--configuration-name',
+      default='Release',
+      help='Gyp configuration name (i.e. Debug, Release)')
+  parser.add_option('--enable-packing',
+      choices=['0', '1'],
+      help=('Pack relocations if 1 and configuration name is \'Release\','
+            ' otherwise plain file copy'))
+  parser.add_option('--exclude-packing-list',
+      default='',
+      help='Names of any libraries explicitly not packed')
+  parser.add_option('--android-pack-relocations',
+      help='Path to the relocations packer binary')
+  parser.add_option('--stripped-libraries-dir',
+      help='Directory for stripped libraries')
+  parser.add_option('--packed-libraries-dir',
+      help='Directory for packed libraries')
+  parser.add_option('--libraries', action='append',
+      help='List of libraries')
+  parser.add_option('--stamp', help='Path to touch on success')
+  parser.add_option('--filelistjson',
+                    help='Output path of filelist.json to write')
+
+  options, _ = parser.parse_args(args)
+  enable_packing = (options.enable_packing == '1' and
+                    options.configuration_name == 'Release')
+  exclude_packing_set = set(build_utils.ParseGypList(
+      options.exclude_packing_list))
+
+  libraries = []
+  for libs_arg in options.libraries:
+    libraries += build_utils.ParseGypList(libs_arg)
+
+  if options.clear_dir:
+    build_utils.DeleteDirectory(options.packed_libraries_dir)
+
+  build_utils.MakeDirectory(options.packed_libraries_dir)
+
+  output_paths = []
+  for library in libraries:
+    library_path = os.path.join(options.stripped_libraries_dir, library)
+    output_path = os.path.join(
+        options.packed_libraries_dir, os.path.basename(library))
+    output_paths.append(output_path)
+
+    if enable_packing and library not in exclude_packing_set:
+      PackLibraryRelocations(options.android_pack_relocations,
+                             library_path,
+                             output_path)
+    else:
+      CopyLibraryUnchanged(library_path, output_path)
+
+  if options.filelistjson:
+    build_utils.WriteJson({ 'files': output_paths }, options.filelistjson)
+
+  if options.depfile:
+    build_utils.WriteDepfile(
+        options.depfile,
+        libraries + build_utils.GetPythonDependencies())
+
+  if options.stamp:
+    build_utils.Touch(options.stamp)
+
+  return 0
+
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv[1:]))
diff --git a/build/android/gyp/package_resources.py b/build/android/gyp/package_resources.py
new file mode 100755
index 0000000..08a2537
--- /dev/null
+++ b/build/android/gyp/package_resources.py
@@ -0,0 +1,325 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=C0301
+"""Package resources into an apk.
+
+See https://android.googlesource.com/platform/tools/base/+/master/legacy/ant-tasks/src/main/java/com/android/ant/AaptExecTask.java
+and
+https://android.googlesource.com/platform/sdk/+/master/files/ant/build.xml
+"""
+# pylint: enable=C0301
+
+import optparse
+import os
+import re
+import shutil
+import sys
+import zipfile
+
+from util import build_utils
+
+
+# List is generated from the chrome_apk.apk_intermediates.ap_ via:
+#     unzip -l $FILE_AP_ | cut -c31- | grep res/draw | cut -d'/' -f 2 | sort \
+#     | uniq | grep -- -tvdpi- | cut -c10-
+# and then manually sorted.
+# Note that we can't just do a cross-product of dimentions because the filenames
+# become too big and aapt fails to create the files.
+# This leaves all default drawables (mdpi) in the main apk. Android gets upset
+# though if any drawables are missing from the default drawables/ directory.
+DENSITY_SPLITS = {
+    'hdpi': (
+        'hdpi-v4', # Order matters for output file names.
+        'ldrtl-hdpi-v4',
+        'sw600dp-hdpi-v13',
+        'ldrtl-hdpi-v17',
+        'ldrtl-sw600dp-hdpi-v17',
+        'hdpi-v21',
+    ),
+    'xhdpi': (
+        'xhdpi-v4',
+        'ldrtl-xhdpi-v4',
+        'sw600dp-xhdpi-v13',
+        'ldrtl-xhdpi-v17',
+        'ldrtl-sw600dp-xhdpi-v17',
+        'xhdpi-v21',
+    ),
+    'xxhdpi': (
+        'xxhdpi-v4',
+        'ldrtl-xxhdpi-v4',
+        'sw600dp-xxhdpi-v13',
+        'ldrtl-xxhdpi-v17',
+        'ldrtl-sw600dp-xxhdpi-v17',
+        'xxhdpi-v21',
+    ),
+    'xxxhdpi': (
+        'xxxhdpi-v4',
+        'ldrtl-xxxhdpi-v4',
+        'sw600dp-xxxhdpi-v13',
+        'ldrtl-xxxhdpi-v17',
+        'ldrtl-sw600dp-xxxhdpi-v17',
+        'xxxhdpi-v21',
+    ),
+    'tvdpi': (
+        'tvdpi-v4',
+        'sw600dp-tvdpi-v13',
+        'ldrtl-sw600dp-tvdpi-v17',
+    ),
+}
+
+
+def _ParseArgs(args):
+  """Parses command line options.
+
+  Returns:
+    An options object as from optparse.OptionsParser.parse_args()
+  """
+  parser = optparse.OptionParser()
+  build_utils.AddDepfileOption(parser)
+  parser.add_option('--android-sdk-jar',
+                    help='path to the Android SDK jar.')
+  parser.add_option('--aapt-path',
+                    help='path to the Android aapt tool')
+
+  parser.add_option('--configuration-name',
+                    help='Gyp\'s configuration name (Debug or Release).')
+
+  parser.add_option('--android-manifest', help='AndroidManifest.xml path')
+  parser.add_option('--version-code', help='Version code for apk.')
+  parser.add_option('--version-name', help='Version name for apk.')
+  parser.add_option(
+      '--shared-resources',
+      action='store_true',
+      help='Make a resource package that can be loaded by a different'
+      'application at runtime to access the package\'s resources.')
+  parser.add_option(
+      '--app-as-shared-lib',
+      action='store_true',
+      help='Make a resource package that can be loaded as shared library')
+  parser.add_option('--resource-zips',
+                    default='[]',
+                    help='zip files containing resources to be packaged')
+  parser.add_option('--asset-dir',
+                    help='directories containing assets to be packaged')
+  parser.add_option('--no-compress', help='disables compression for the '
+                    'given comma separated list of extensions')
+  parser.add_option(
+      '--create-density-splits',
+      action='store_true',
+      help='Enables density splits')
+  parser.add_option('--language-splits',
+                    default='[]',
+                    help='GYP list of languages to create splits for')
+
+  parser.add_option('--apk-path',
+                    help='Path to output (partial) apk.')
+
+  options, positional_args = parser.parse_args(args)
+
+  if positional_args:
+    parser.error('No positional arguments should be given.')
+
+  # Check that required options have been provided.
+  required_options = ('android_sdk_jar', 'aapt_path', 'configuration_name',
+                      'android_manifest', 'version_code', 'version_name',
+                      'apk_path')
+
+  build_utils.CheckOptions(options, parser, required=required_options)
+
+  options.resource_zips = build_utils.ParseGypList(options.resource_zips)
+  options.language_splits = build_utils.ParseGypList(options.language_splits)
+  return options
+
+
+def MoveImagesToNonMdpiFolders(res_root):
+  """Move images from drawable-*-mdpi-* folders to drawable-* folders.
+
+  Why? http://crbug.com/289843
+  """
+  for src_dir_name in os.listdir(res_root):
+    src_components = src_dir_name.split('-')
+    if src_components[0] != 'drawable' or 'mdpi' not in src_components:
+      continue
+    src_dir = os.path.join(res_root, src_dir_name)
+    if not os.path.isdir(src_dir):
+      continue
+    dst_components = [c for c in src_components if c != 'mdpi']
+    assert dst_components != src_components
+    dst_dir_name = '-'.join(dst_components)
+    dst_dir = os.path.join(res_root, dst_dir_name)
+    build_utils.MakeDirectory(dst_dir)
+    for src_file_name in os.listdir(src_dir):
+      if not src_file_name.endswith('.png'):
+        continue
+      src_file = os.path.join(src_dir, src_file_name)
+      dst_file = os.path.join(dst_dir, src_file_name)
+      assert not os.path.lexists(dst_file)
+      shutil.move(src_file, dst_file)
+
+
+def PackageArgsForExtractedZip(d):
+  """Returns the aapt args for an extracted resources zip.
+
+  A resources zip either contains the resources for a single target or for
+  multiple targets. If it is multiple targets merged into one, the actual
+  resource directories will be contained in the subdirectories 0, 1, 2, ...
+  """
+  subdirs = [os.path.join(d, s) for s in os.listdir(d)]
+  subdirs = [s for s in subdirs if os.path.isdir(s)]
+  is_multi = '0' in [os.path.basename(s) for s in subdirs]
+  if is_multi:
+    res_dirs = sorted(subdirs, key=lambda p : int(os.path.basename(p)))
+  else:
+    res_dirs = [d]
+  package_command = []
+  for d in res_dirs:
+    MoveImagesToNonMdpiFolders(d)
+    package_command += ['-S', d]
+  return package_command
+
+
+def _GenerateDensitySplitPaths(apk_path):
+  for density, config in DENSITY_SPLITS.iteritems():
+    src_path = '%s_%s' % (apk_path, '_'.join(config))
+    dst_path = '%s_%s' % (apk_path, density)
+    yield src_path, dst_path
+
+
+def _GenerateLanguageSplitOutputPaths(apk_path, languages):
+  for lang in languages:
+    yield '%s_%s' % (apk_path, lang)
+
+
+def RenameDensitySplits(apk_path):
+  """Renames all density splits to have shorter / predictable names."""
+  for src_path, dst_path in _GenerateDensitySplitPaths(apk_path):
+    shutil.move(src_path, dst_path)
+
+
+def CheckForMissedConfigs(apk_path, check_density, languages):
+  """Raises an exception if apk_path contains any unexpected configs."""
+  triggers = []
+  if check_density:
+    triggers.extend(re.compile('-%s' % density) for density in DENSITY_SPLITS)
+  if languages:
+    triggers.extend(re.compile(r'-%s\b' % lang) for lang in languages)
+  with zipfile.ZipFile(apk_path) as main_apk_zip:
+    for name in main_apk_zip.namelist():
+      for trigger in triggers:
+        if trigger.search(name) and not 'mipmap-' in name:
+          raise Exception(('Found config in main apk that should have been ' +
+                           'put into a split: %s\nYou need to update ' +
+                           'package_resources.py to include this new ' +
+                           'config (trigger=%s)') % (name, trigger.pattern))
+
+
+def _ConstructMostAaptArgs(options):
+  package_command = [
+      options.aapt_path,
+      'package',
+      '--version-code', options.version_code,
+      '--version-name', options.version_name,
+      '-M', options.android_manifest,
+      '--no-crunch',
+      '-f',
+      '--auto-add-overlay',
+      '--no-version-vectors',
+      '-I', options.android_sdk_jar,
+      '-F', options.apk_path,
+      '--ignore-assets', build_utils.AAPT_IGNORE_PATTERN,
+  ]
+
+  if options.no_compress:
+    for ext in options.no_compress.split(','):
+      package_command += ['-0', ext]
+
+  if options.shared_resources:
+    package_command.append('--shared-lib')
+
+  if options.app_as_shared_lib:
+    package_command.append('--app-as-shared-lib')
+
+  if options.asset_dir and os.path.exists(options.asset_dir):
+    package_command += ['-A', options.asset_dir]
+
+  if options.create_density_splits:
+    for config in DENSITY_SPLITS.itervalues():
+      package_command.extend(('--split', ','.join(config)))
+
+  if options.language_splits:
+    for lang in options.language_splits:
+      package_command.extend(('--split', lang))
+
+  if 'Debug' in options.configuration_name:
+    package_command += ['--debug-mode']
+
+  return package_command
+
+
+def _OnStaleMd5(package_command, options):
+  with build_utils.TempDir() as temp_dir:
+    if options.resource_zips:
+      dep_zips = options.resource_zips
+      for z in dep_zips:
+        subdir = os.path.join(temp_dir, os.path.basename(z))
+        if os.path.exists(subdir):
+          raise Exception('Resource zip name conflict: ' + os.path.basename(z))
+        build_utils.ExtractAll(z, path=subdir)
+        package_command += PackageArgsForExtractedZip(subdir)
+
+    build_utils.CheckOutput(
+        package_command, print_stdout=False, print_stderr=False)
+
+    if options.create_density_splits or options.language_splits:
+      CheckForMissedConfigs(options.apk_path, options.create_density_splits,
+                            options.language_splits)
+
+    if options.create_density_splits:
+      RenameDensitySplits(options.apk_path)
+
+
+def main(args):
+  args = build_utils.ExpandFileArgs(args)
+  options = _ParseArgs(args)
+
+  package_command = _ConstructMostAaptArgs(options)
+
+  output_paths = [ options.apk_path ]
+
+  if options.create_density_splits:
+    for _, dst_path in _GenerateDensitySplitPaths(options.apk_path):
+      output_paths.append(dst_path)
+  output_paths.extend(
+      _GenerateLanguageSplitOutputPaths(options.apk_path,
+                                        options.language_splits))
+
+  input_paths = [ options.android_manifest ] + options.resource_zips
+
+  input_strings = []
+  input_strings.extend(package_command)
+
+  # The md5_check.py doesn't count file path in md5 intentionally,
+  # in order to repackage resources when assets' name changed, we need
+  # to put assets into input_strings, as we know the assets path isn't
+  # changed among each build if there is no asset change.
+  if options.asset_dir and os.path.exists(options.asset_dir):
+    asset_paths = []
+    for root, _, filenames in os.walk(options.asset_dir):
+      asset_paths.extend(os.path.join(root, f) for f in filenames)
+    input_paths.extend(asset_paths)
+    input_strings.extend(sorted(asset_paths))
+
+  build_utils.CallAndWriteDepfileIfStale(
+      lambda: _OnStaleMd5(package_command, options),
+      options,
+      input_paths=input_paths,
+      input_strings=input_strings,
+      output_paths=output_paths)
+
+
+if __name__ == '__main__':
+  main(sys.argv[1:])
diff --git a/build/android/gyp/process_resources.py b/build/android/gyp/process_resources.py
new file mode 100755
index 0000000..f8971aa
--- /dev/null
+++ b/build/android/gyp/process_resources.py
@@ -0,0 +1,503 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Process Android resources to generate R.java, and prepare for packaging.
+
+This will crunch images and generate v14 compatible resources
+(see generate_v14_compatible_resources.py).
+"""
+
+import codecs
+import collections
+import optparse
+import os
+import re
+import shutil
+import sys
+
+import generate_v14_compatible_resources
+
+from util import build_utils
+
+# Import jinja2 from third_party/jinja2
+sys.path.insert(1,
+    os.path.join(os.path.dirname(__file__), '../../../third_party'))
+from jinja2 import Template # pylint: disable=F0401
+
+
+# Represents a line from a R.txt file.
+TextSymbolsEntry = collections.namedtuple('RTextEntry',
+    ('java_type', 'resource_type', 'name', 'value'))
+
+
+def _ParseArgs(args):
+  """Parses command line options.
+
+  Returns:
+    An options object as from optparse.OptionsParser.parse_args()
+  """
+  parser = optparse.OptionParser()
+  build_utils.AddDepfileOption(parser)
+
+  parser.add_option('--android-sdk-jar',
+                    help='the path to android jar file.')
+  parser.add_option('--aapt-path',
+                    help='path to the Android aapt tool')
+  parser.add_option('--non-constant-id', action='store_true')
+
+  parser.add_option('--android-manifest', help='AndroidManifest.xml path')
+  parser.add_option('--custom-package', help='Java package for R.java')
+  parser.add_option(
+      '--shared-resources',
+      action='store_true',
+      help='Make a resource package that can be loaded by a different'
+      'application at runtime to access the package\'s resources.')
+  parser.add_option(
+      '--app-as-shared-lib',
+      action='store_true',
+      help='Make a resource package that can be loaded as shared library.')
+
+  parser.add_option('--resource-dirs',
+                    help='Directories containing resources of this target.')
+  parser.add_option('--dependencies-res-zips',
+                    help='Resources from dependents.')
+
+  parser.add_option('--resource-zip-out',
+                    help='Path for output zipped resources.')
+
+  parser.add_option('--R-dir',
+                    help='directory to hold generated R.java.')
+  parser.add_option('--srcjar-out',
+                    help='Path to srcjar to contain generated R.java.')
+  parser.add_option('--r-text-out',
+                    help='Path to store the R.txt file generated by appt.')
+
+  parser.add_option('--proguard-file',
+                    help='Path to proguard.txt generated file')
+
+  parser.add_option(
+      '--v14-skip',
+      action="store_true",
+      help='Do not generate nor verify v14 resources')
+
+  parser.add_option(
+      '--extra-res-packages',
+      help='Additional package names to generate R.java files for')
+  parser.add_option(
+      '--extra-r-text-files',
+      help='For each additional package, the R.txt file should contain a '
+      'list of resources to be included in the R.java file in the format '
+      'generated by aapt')
+  parser.add_option(
+      '--include-all-resources',
+      action='store_true',
+      help='Include every resource ID in every generated R.java file '
+      '(ignoring R.txt).')
+
+  parser.add_option(
+      '--all-resources-zip-out',
+      help='Path for output of all resources. This includes resources in '
+      'dependencies.')
+
+  parser.add_option('--stamp', help='File to touch on success')
+
+  options, positional_args = parser.parse_args(args)
+
+  if positional_args:
+    parser.error('No positional arguments should be given.')
+
+  # Check that required options have been provided.
+  required_options = (
+      'android_sdk_jar',
+      'aapt_path',
+      'android_manifest',
+      'dependencies_res_zips',
+      'resource_dirs',
+      'resource_zip_out',
+      )
+  build_utils.CheckOptions(options, parser, required=required_options)
+
+  if (options.R_dir is None) == (options.srcjar_out is None):
+    raise Exception('Exactly one of --R-dir or --srcjar-out must be specified.')
+
+  options.resource_dirs = build_utils.ParseGypList(options.resource_dirs)
+  options.dependencies_res_zips = (
+      build_utils.ParseGypList(options.dependencies_res_zips))
+
+  # Don't use [] as default value since some script explicitly pass "".
+  if options.extra_res_packages:
+    options.extra_res_packages = (
+        build_utils.ParseGypList(options.extra_res_packages))
+  else:
+    options.extra_res_packages = []
+
+  if options.extra_r_text_files:
+    options.extra_r_text_files = (
+        build_utils.ParseGypList(options.extra_r_text_files))
+  else:
+    options.extra_r_text_files = []
+
+  return options
+
+
+def CreateExtraRJavaFiles(
+      r_dir, extra_packages, extra_r_text_files, shared_resources, include_all):
+  if include_all:
+    java_files = build_utils.FindInDirectory(r_dir, "R.java")
+    if len(java_files) != 1:
+      return
+    r_java_file = java_files[0]
+    r_java_contents = codecs.open(r_java_file, encoding='utf-8').read()
+
+    for package in extra_packages:
+      package_r_java_dir = os.path.join(r_dir, *package.split('.'))
+      build_utils.MakeDirectory(package_r_java_dir)
+      package_r_java_path = os.path.join(package_r_java_dir, 'R.java')
+      new_r_java = re.sub(r'package [.\w]*;', u'package %s;' % package,
+                          r_java_contents)
+      codecs.open(package_r_java_path, 'w', encoding='utf-8').write(new_r_java)
+  else:
+    if len(extra_packages) != len(extra_r_text_files):
+      raise Exception('Need one R.txt file per extra package')
+
+    r_txt_file = os.path.join(r_dir, 'R.txt')
+    if not os.path.exists(r_txt_file):
+      return
+
+    # Map of (resource_type, name) -> Entry.
+    # Contains the correct values for resources.
+    all_resources = {}
+    for entry in _ParseTextSymbolsFile(r_txt_file):
+      all_resources[(entry.resource_type, entry.name)] = entry
+
+    # Map of package_name->resource_type->entry
+    resources_by_package = (
+        collections.defaultdict(lambda: collections.defaultdict(list)))
+    # Build the R.java files using each package's R.txt file, but replacing
+    # each entry's placeholder value with correct values from all_resources.
+    for package, r_text_file in zip(extra_packages, extra_r_text_files):
+      if not os.path.exists(r_text_file):
+        continue
+      if package in resources_by_package:
+        raise Exception(('Package name "%s" appeared twice. All '
+                         'android_resources() targets must use unique package '
+                         'names, or no package name at all.') % package)
+      resources_by_type = resources_by_package[package]
+      # The sub-R.txt files have the wrong values at this point. Read them to
+      # figure out which entries belong to them, but use the values from the
+      # main R.txt file.
+      for entry in _ParseTextSymbolsFile(r_text_file):
+        entry = all_resources[(entry.resource_type, entry.name)]
+        resources_by_type[entry.resource_type].append(entry)
+
+    for package, resources_by_type in resources_by_package.iteritems():
+      package_r_java_dir = os.path.join(r_dir, *package.split('.'))
+      build_utils.MakeDirectory(package_r_java_dir)
+      package_r_java_path = os.path.join(package_r_java_dir, 'R.java')
+      java_file_contents = _CreateExtraRJavaFile(
+          package, resources_by_type, shared_resources)
+      with open(package_r_java_path, 'w') as f:
+        f.write(java_file_contents)
+
+
+def _ParseTextSymbolsFile(path):
+  """Given an R.txt file, returns a list of TextSymbolsEntry."""
+  ret = []
+  with open(path) as f:
+    for line in f:
+      m = re.match(r'(int(?:\[\])?) (\w+) (\w+) (.+)$', line)
+      if not m:
+        raise Exception('Unexpected line in R.txt: %s' % line)
+      java_type, resource_type, name, value = m.groups()
+      ret.append(TextSymbolsEntry(java_type, resource_type, name, value))
+  return ret
+
+
+def _CreateExtraRJavaFile(package, resources_by_type, shared_resources):
+  """Generates the contents of a R.java file."""
+  template = Template("""/* AUTO-GENERATED FILE.  DO NOT MODIFY. */
+
+package {{ package }};
+
+public final class R {
+    {% for resource_type in resources %}
+    public static final class {{ resource_type }} {
+        {% for e in resources[resource_type] %}
+        {% if shared_resources %}
+        public static {{ e.java_type }} {{ e.name }} = {{ e.value }};
+        {% else %}
+        public static final {{ e.java_type }} {{ e.name }} = {{ e.value }};
+        {% endif %}
+        {% endfor %}
+    }
+    {% endfor %}
+    {% if shared_resources %}
+    public static void onResourcesLoaded(int packageId) {
+        {% for resource_type in resources %}
+        {% for e in resources[resource_type] %}
+        {% if e.java_type == 'int[]' %}
+        for(int i = 0; i < {{ e.resource_type }}.{{ e.name }}.length; ++i) {
+            {{ e.resource_type }}.{{ e.name }}[i] =
+                    ({{ e.resource_type }}.{{ e.name }}[i] & 0x00ffffff)
+                    | (packageId << 24);
+        }
+        {% else %}
+        {{ e.resource_type }}.{{ e.name }} =
+                ({{ e.resource_type }}.{{ e.name }} & 0x00ffffff)
+                | (packageId << 24);
+        {% endif %}
+        {% endfor %}
+        {% endfor %}
+    }
+    {% endif %}
+}
+""", trim_blocks=True, lstrip_blocks=True)
+
+  return template.render(package=package, resources=resources_by_type,
+                         shared_resources=shared_resources)
+
+
+def CrunchDirectory(aapt, input_dir, output_dir):
+  """Crunches the images in input_dir and its subdirectories into output_dir.
+
+  If an image is already optimized, crunching often increases image size. In
+  this case, the crunched image is overwritten with the original image.
+  """
+  aapt_cmd = [aapt,
+              'crunch',
+              '-C', output_dir,
+              '-S', input_dir,
+              '--ignore-assets', build_utils.AAPT_IGNORE_PATTERN]
+  build_utils.CheckOutput(aapt_cmd, stderr_filter=FilterCrunchStderr,
+                          fail_func=DidCrunchFail)
+
+  # Check for images whose size increased during crunching and replace them
+  # with their originals (except for 9-patches, which must be crunched).
+  for dir_, _, files in os.walk(output_dir):
+    for crunched in files:
+      if crunched.endswith('.9.png'):
+        continue
+      if not crunched.endswith('.png'):
+        raise Exception('Unexpected file in crunched dir: ' + crunched)
+      crunched = os.path.join(dir_, crunched)
+      original = os.path.join(input_dir, os.path.relpath(crunched, output_dir))
+      original_size = os.path.getsize(original)
+      crunched_size = os.path.getsize(crunched)
+      if original_size < crunched_size:
+        shutil.copyfile(original, crunched)
+
+
+def FilterCrunchStderr(stderr):
+  """Filters out lines from aapt crunch's stderr that can safely be ignored."""
+  filtered_lines = []
+  for line in stderr.splitlines(True):
+    # Ignore this libpng warning, which is a known non-error condition.
+    # http://crbug.com/364355
+    if ('libpng warning: iCCP: Not recognizing known sRGB profile that has '
+        + 'been edited' in line):
+      continue
+    filtered_lines.append(line)
+  return ''.join(filtered_lines)
+
+
+def DidCrunchFail(returncode, stderr):
+  """Determines whether aapt crunch failed from its return code and output.
+
+  Because aapt's return code cannot be trusted, any output to stderr is
+  an indication that aapt has failed (http://crbug.com/314885).
+  """
+  return returncode != 0 or stderr
+
+
+def ZipResources(resource_dirs, zip_path):
+  # Python zipfile does not provide a way to replace a file (it just writes
+  # another file with the same name). So, first collect all the files to put
+  # in the zip (with proper overriding), and then zip them.
+  files_to_zip = dict()
+  for d in resource_dirs:
+    for root, _, files in os.walk(d):
+      for f in files:
+        archive_path = f
+        parent_dir = os.path.relpath(root, d)
+        if parent_dir != '.':
+          archive_path = os.path.join(parent_dir, f)
+        path = os.path.join(root, f)
+        files_to_zip[archive_path] = path
+  build_utils.DoZip(files_to_zip.iteritems(), zip_path)
+
+
+def CombineZips(zip_files, output_path):
+  # When packaging resources, if the top-level directories in the zip file are
+  # of the form 0, 1, ..., then each subdirectory will be passed to aapt as a
+  # resources directory. While some resources just clobber others (image files,
+  # etc), other resources (particularly .xml files) need to be more
+  # intelligently merged. That merging is left up to aapt.
+  def path_transform(name, src_zip):
+    return '%d/%s' % (zip_files.index(src_zip), name)
+
+  build_utils.MergeZips(output_path, zip_files, path_transform=path_transform)
+
+
+def _OnStaleMd5(options):
+  aapt = options.aapt_path
+  with build_utils.TempDir() as temp_dir:
+    deps_dir = os.path.join(temp_dir, 'deps')
+    build_utils.MakeDirectory(deps_dir)
+    v14_dir = os.path.join(temp_dir, 'v14')
+    build_utils.MakeDirectory(v14_dir)
+
+    gen_dir = os.path.join(temp_dir, 'gen')
+    build_utils.MakeDirectory(gen_dir)
+
+    input_resource_dirs = options.resource_dirs
+
+    if not options.v14_skip:
+      for resource_dir in input_resource_dirs:
+        generate_v14_compatible_resources.GenerateV14Resources(
+            resource_dir,
+            v14_dir)
+
+    dep_zips = options.dependencies_res_zips
+    dep_subdirs = []
+    for z in dep_zips:
+      subdir = os.path.join(deps_dir, os.path.basename(z))
+      if os.path.exists(subdir):
+        raise Exception('Resource zip name conflict: ' + os.path.basename(z))
+      build_utils.ExtractAll(z, path=subdir)
+      dep_subdirs.append(subdir)
+
+    # Generate R.java. This R.java contains non-final constants and is used only
+    # while compiling the library jar (e.g. chromium_content.jar). When building
+    # an apk, a new R.java file with the correct resource -> ID mappings will be
+    # generated by merging the resources from all libraries and the main apk
+    # project.
+    package_command = [aapt,
+                       'package',
+                       '-m',
+                       '-M', options.android_manifest,
+                       '--auto-add-overlay',
+                       '--no-version-vectors',
+                       '-I', options.android_sdk_jar,
+                       '--output-text-symbols', gen_dir,
+                       '-J', gen_dir,
+                       '--ignore-assets', build_utils.AAPT_IGNORE_PATTERN]
+
+    for d in input_resource_dirs:
+      package_command += ['-S', d]
+
+    for d in dep_subdirs:
+      package_command += ['-S', d]
+
+    if options.non_constant_id:
+      package_command.append('--non-constant-id')
+    if options.custom_package:
+      package_command += ['--custom-package', options.custom_package]
+    if options.proguard_file:
+      package_command += ['-G', options.proguard_file]
+    if options.shared_resources:
+      package_command.append('--shared-lib')
+    if options.app_as_shared_lib:
+      package_command.append('--app-as-shared-lib')
+    build_utils.CheckOutput(package_command, print_stderr=False)
+
+    if options.extra_res_packages:
+      CreateExtraRJavaFiles(
+          gen_dir,
+          options.extra_res_packages,
+          options.extra_r_text_files,
+          options.shared_resources or options.app_as_shared_lib,
+          options.include_all_resources)
+
+    # This is the list of directories with resources to put in the final .zip
+    # file. The order of these is important so that crunched/v14 resources
+    # override the normal ones.
+    zip_resource_dirs = input_resource_dirs + [v14_dir]
+
+    base_crunch_dir = os.path.join(temp_dir, 'crunch')
+
+    # Crunch image resources. This shrinks png files and is necessary for
+    # 9-patch images to display correctly. 'aapt crunch' accepts only a single
+    # directory at a time and deletes everything in the output directory.
+    for idx, input_dir in enumerate(input_resource_dirs):
+      crunch_dir = os.path.join(base_crunch_dir, str(idx))
+      build_utils.MakeDirectory(crunch_dir)
+      zip_resource_dirs.append(crunch_dir)
+      CrunchDirectory(aapt, input_dir, crunch_dir)
+
+    ZipResources(zip_resource_dirs, options.resource_zip_out)
+
+    if options.all_resources_zip_out:
+      CombineZips([options.resource_zip_out] + dep_zips,
+                  options.all_resources_zip_out)
+
+    if options.R_dir:
+      build_utils.DeleteDirectory(options.R_dir)
+      shutil.copytree(gen_dir, options.R_dir)
+    else:
+      build_utils.ZipDir(options.srcjar_out, gen_dir)
+
+    if options.r_text_out:
+      r_text_path = os.path.join(gen_dir, 'R.txt')
+      if os.path.exists(r_text_path):
+        shutil.copyfile(r_text_path, options.r_text_out)
+      else:
+        open(options.r_text_out, 'w').close()
+
+
+def main(args):
+  args = build_utils.ExpandFileArgs(args)
+  options = _ParseArgs(args)
+
+  possible_output_paths = [
+    options.resource_zip_out,
+    options.all_resources_zip_out,
+    options.proguard_file,
+    options.r_text_out,
+    options.srcjar_out,
+  ]
+  output_paths = [x for x in possible_output_paths if x]
+
+  # List python deps in input_strings rather than input_paths since the contents
+  # of them does not change what gets written to the depsfile.
+  input_strings = options.extra_res_packages + [
+    options.app_as_shared_lib,
+    options.custom_package,
+    options.include_all_resources,
+    options.non_constant_id,
+    options.shared_resources,
+    options.v14_skip,
+  ]
+
+  input_paths = [
+    options.aapt_path,
+    options.android_manifest,
+    options.android_sdk_jar,
+  ]
+  input_paths.extend(options.dependencies_res_zips)
+  input_paths.extend(p for p in options.extra_r_text_files if os.path.exists(p))
+
+  resource_names = []
+  for resource_dir in options.resource_dirs:
+    for resource_file in build_utils.FindInDirectory(resource_dir, '*'):
+      input_paths.append(resource_file)
+      resource_names.append(os.path.relpath(resource_file, resource_dir))
+
+  # Resource filenames matter to the output, so add them to strings as well.
+  # This matters if a file is renamed but not changed (http://crbug.com/597126).
+  input_strings.extend(sorted(resource_names))
+
+  build_utils.CallAndWriteDepfileIfStale(
+      lambda: _OnStaleMd5(options),
+      options,
+      input_paths=input_paths,
+      input_strings=input_strings,
+      output_paths=output_paths,
+      # TODO(agrieve): Remove R_dir when it's no longer used (used only by GYP).
+      force=options.R_dir)
+
+
+if __name__ == '__main__':
+  main(sys.argv[1:])
diff --git a/build/android/gyp/proguard.py b/build/android/gyp/proguard.py
new file mode 100755
index 0000000..d019350
--- /dev/null
+++ b/build/android/gyp/proguard.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+#
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import optparse
+import os
+import sys
+
+from util import build_utils
+from util import proguard_util
+
+
+def _ParseOptions(args):
+  parser = optparse.OptionParser()
+  build_utils.AddDepfileOption(parser)
+  parser.add_option('--proguard-path',
+                    help='Path to the proguard executable.')
+  parser.add_option('--input-paths',
+                    help='Paths to the .jar files proguard should run on.')
+  parser.add_option('--output-path', help='Path to the generated .jar file.')
+  parser.add_option('--proguard-configs',
+                    help='Paths to proguard configuration files.')
+  parser.add_option('--mapping', help='Path to proguard mapping to apply.')
+  parser.add_option('--is-test', action='store_true',
+      help='If true, extra proguard options for instrumentation tests will be '
+      'added.')
+  parser.add_option('--tested-apk-info', help='Path to the proguard .info file '
+      'for the tested apk')
+  parser.add_option('--classpath', action='append',
+                    help='Classpath for proguard.')
+  parser.add_option('--stamp', help='Path to touch on success.')
+  parser.add_option('--verbose', '-v', action='store_true',
+                    help='Print all proguard output')
+
+  options, _ = parser.parse_args(args)
+
+  classpath = []
+  for arg in options.classpath:
+    classpath += build_utils.ParseGypList(arg)
+  options.classpath = classpath
+
+  return options
+
+
+def main(args):
+  args = build_utils.ExpandFileArgs(args)
+  options = _ParseOptions(args)
+
+  proguard = proguard_util.ProguardCmdBuilder(options.proguard_path)
+  proguard.injars(build_utils.ParseGypList(options.input_paths))
+  proguard.configs(build_utils.ParseGypList(options.proguard_configs))
+  proguard.outjar(options.output_path)
+
+  if options.mapping:
+    proguard.mapping(options.mapping)
+
+  if options.tested_apk_info:
+    proguard.tested_apk_info(options.tested_apk_info)
+
+  classpath = list(set(options.classpath))
+  proguard.libraryjars(classpath)
+  proguard.verbose(options.verbose)
+
+  input_paths = proguard.GetInputs()
+
+  build_utils.CallAndWriteDepfileIfStale(
+      proguard.CheckOutput,
+      options,
+      input_paths=input_paths,
+      input_strings=proguard.build(),
+      output_paths=[options.output_path])
+
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv[1:]))
diff --git a/build/android/gyp/push_libraries.py b/build/android/gyp/push_libraries.py
new file mode 100755
index 0000000..3dae6f0
--- /dev/null
+++ b/build/android/gyp/push_libraries.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python
+#
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Pushes native libraries to a device.
+
+"""
+
+import optparse
+import os
+import sys
+
+from util import build_device
+from util import build_utils
+from util import md5_check
+
+BUILD_ANDROID_DIR = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), os.pardir))
+sys.path.append(BUILD_ANDROID_DIR)
+
+import devil_chromium
+from pylib import constants
+
+def DoPush(options):
+  libraries = build_utils.ParseGypList(options.libraries)
+
+  device = build_device.GetBuildDeviceFromPath(
+      options.build_device_configuration)
+  if not device:
+    return
+
+  serial_number = device.GetSerialNumber()
+  # A list so that it is modifiable in Push below.
+  needs_directory = [True]
+  for lib in libraries:
+    device_path = os.path.join(options.device_dir, lib)
+    host_path = os.path.join(options.libraries_dir, lib)
+
+    def Push():
+      if needs_directory:
+        device.RunShellCommand('mkdir -p ' + options.device_dir)
+        needs_directory[:] = [] # = False
+      device.PushChangedFiles([(os.path.abspath(host_path), device_path)])
+
+    record_path = '%s.%s.push.md5.stamp' % (host_path, serial_number)
+    md5_check.CallAndRecordIfStale(
+        Push,
+        record_path=record_path,
+        input_paths=[host_path],
+        input_strings=[device_path])
+
+
+def main(args):
+  args = build_utils.ExpandFileArgs(args)
+  parser = optparse.OptionParser()
+  parser.add_option('--libraries-dir',
+      help='Directory that contains stripped libraries.')
+  parser.add_option('--device-dir',
+      help='Device directory to push the libraries to.')
+  parser.add_option('--libraries',
+      help='List of native libraries.')
+  parser.add_option('--stamp', help='Path to touch on success.')
+  parser.add_option('--build-device-configuration',
+      help='Path to build device configuration.')
+  parser.add_option('--output-directory',
+      help='The output directory.')
+  options, _ = parser.parse_args(args)
+
+  required_options = ['libraries', 'device_dir', 'libraries']
+  build_utils.CheckOptions(options, parser, required=required_options)
+
+  devil_chromium.Initialize(
+      output_directory=os.path.abspath(options.output_directory))
+
+  DoPush(options)
+
+  if options.stamp:
+    build_utils.Touch(options.stamp)
+
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv[1:]))
diff --git a/build/android/gyp/strip_library_for_device.py b/build/android/gyp/strip_library_for_device.py
new file mode 100755
index 0000000..9e2daae
--- /dev/null
+++ b/build/android/gyp/strip_library_for_device.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+#
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import optparse
+import os
+import sys
+
+from util import build_utils
+
+
+def StripLibrary(android_strip, android_strip_args, library_path, output_path):
+  if build_utils.IsTimeStale(output_path, [library_path]):
+    strip_cmd = ([android_strip] +
+                 android_strip_args +
+                 ['-o', output_path, library_path])
+    build_utils.CheckOutput(strip_cmd)
+
+
+def main(args):
+  args = build_utils.ExpandFileArgs(args)
+
+  parser = optparse.OptionParser()
+  build_utils.AddDepfileOption(parser)
+
+  parser.add_option('--android-strip',
+      help='Path to the toolchain\'s strip binary')
+  parser.add_option('--android-strip-arg', action='append',
+      help='Argument to be passed to strip')
+  parser.add_option('--libraries-dir',
+      help='Directory for un-stripped libraries')
+  parser.add_option('--stripped-libraries-dir',
+      help='Directory for stripped libraries')
+  parser.add_option('--libraries',
+      help='List of libraries to strip')
+  parser.add_option('--stamp', help='Path to touch on success')
+
+  options, _ = parser.parse_args(args)
+
+  libraries = build_utils.ParseGypList(options.libraries)
+
+  build_utils.MakeDirectory(options.stripped_libraries_dir)
+
+  for library in libraries:
+    for base_path in options.libraries_dir.split(','):
+      library_path = os.path.join(base_path, library)
+      if (os.path.exists(library_path)):
+        break
+    stripped_library_path = os.path.join(
+        options.stripped_libraries_dir, library)
+    StripLibrary(options.android_strip, options.android_strip_arg, library_path,
+        stripped_library_path)
+
+  if options.stamp:
+    build_utils.Touch(options.stamp)
+
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv[1:]))
diff --git a/build/android/gyp/test/BUILD.gn b/build/android/gyp/test/BUILD.gn
new file mode 100644
index 0000000..2deac1d
--- /dev/null
+++ b/build/android/gyp/test/BUILD.gn
@@ -0,0 +1,13 @@
+import("//build/config/android/rules.gni")
+
+java_library("hello_world_java") {
+  java_files = [ "java/org/chromium/helloworld/HelloWorldPrinter.java" ]
+}
+
+java_binary("hello_world") {
+  deps = [
+    ":hello_world_java",
+  ]
+  java_files = [ "java/org/chromium/helloworld/HelloWorldMain.java" ]
+  main_class = "org.chromium.helloworld.HelloWorldMain"
+}
diff --git a/build/android/gyp/test/java/org/chromium/helloworld/HelloWorldMain.java b/build/android/gyp/test/java/org/chromium/helloworld/HelloWorldMain.java
new file mode 100644
index 0000000..10860d8
--- /dev/null
+++ b/build/android/gyp/test/java/org/chromium/helloworld/HelloWorldMain.java
@@ -0,0 +1,15 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.helloworld;
+
+public class HelloWorldMain {
+    public static void main(String[] args) {
+        if (args.length > 0) {
+            System.exit(Integer.parseInt(args[0]));
+        }
+        HelloWorldPrinter.print();
+    }
+}
+
diff --git a/build/android/gyp/test/java/org/chromium/helloworld/HelloWorldPrinter.java b/build/android/gyp/test/java/org/chromium/helloworld/HelloWorldPrinter.java
new file mode 100644
index 0000000..b09673e
--- /dev/null
+++ b/build/android/gyp/test/java/org/chromium/helloworld/HelloWorldPrinter.java
@@ -0,0 +1,12 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.helloworld;
+
+public class HelloWorldPrinter {
+    public static void print() {
+        System.out.println("Hello, world!");
+    }
+}
+
diff --git a/build/android/gyp/touch.py b/build/android/gyp/touch.py
new file mode 100755
index 0000000..7b4375e
--- /dev/null
+++ b/build/android/gyp/touch.py
@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+from util import build_utils
+
+def main(argv):
+  for f in argv[1:]:
+    build_utils.Touch(f)
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv))
diff --git a/build/android/gyp/util/__init__.py b/build/android/gyp/util/__init__.py
new file mode 100644
index 0000000..727e987
--- /dev/null
+++ b/build/android/gyp/util/__init__.py
@@ -0,0 +1,4 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
diff --git a/build/android/gyp/util/build_device.py b/build/android/gyp/util/build_device.py
new file mode 100644
index 0000000..83aa0d5
--- /dev/null
+++ b/build/android/gyp/util/build_device.py
@@ -0,0 +1,108 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+""" A simple device interface for build steps.
+
+"""
+
+import logging
+import os
+import re
+import sys
+
+from util import build_utils
+
+from devil.android import device_errors
+from devil.android import device_utils
+from devil.android.sdk import adb_wrapper
+
+
+def GetAttachedDevices():
+  return [a.GetDeviceSerial()
+          for a in adb_wrapper.AdbWrapper.Devices()]
+
+
+class BuildDevice(object):
+  def __init__(self, configuration):
+    self.id = configuration['id']
+    self.description = configuration['description']
+    self.install_metadata = configuration['install_metadata']
+    self.device = device_utils.DeviceUtils(self.id)
+
+  def RunShellCommand(self, *args, **kwargs):
+    return self.device.RunShellCommand(*args, **kwargs)
+
+  def PushChangedFiles(self, *args, **kwargs):
+    return self.device.PushChangedFiles(*args, **kwargs)
+
+  def GetSerialNumber(self):
+    return self.id
+
+  def Install(self, *args, **kwargs):
+    return self.device.Install(*args, **kwargs)
+
+  def InstallSplitApk(self, *args, **kwargs):
+    return self.device.InstallSplitApk(*args, **kwargs)
+
+  def GetInstallMetadata(self, apk_package):
+    """Gets the metadata on the device for the apk_package apk."""
+    # Matches lines like:
+    # -rw-r--r-- system   system    7376582 2013-04-19 16:34 \
+    #   org.chromium.chrome.apk
+    # -rw-r--r-- system   system    7376582 2013-04-19 16:34 \
+    #   org.chromium.chrome-1.apk
+    apk_matcher = lambda s: re.match('.*%s(-[0-9]*)?.apk$' % apk_package, s)
+    matches = filter(apk_matcher, self.install_metadata)
+    return matches[0] if matches else None
+
+
+def GetConfigurationForDevice(device_id):
+  device = device_utils.DeviceUtils(device_id)
+  configuration = None
+  has_root = False
+  is_online = device.IsOnline()
+  if is_online:
+    cmd = 'ls -l /data/app; getprop ro.build.description'
+    cmd_output = device.RunShellCommand(cmd)
+    has_root = not 'Permission denied' in cmd_output[0]
+    if not has_root:
+      # Disable warning log messages from EnableRoot()
+      logging.getLogger().disabled = True
+      try:
+        device.EnableRoot()
+        has_root = True
+      except device_errors.CommandFailedError:
+        has_root = False
+      finally:
+        logging.getLogger().disabled = False
+      cmd_output = device.RunShellCommand(cmd)
+
+    configuration = {
+        'id': device_id,
+        'description': cmd_output[-1],
+        'install_metadata': cmd_output[:-1],
+      }
+  return configuration, is_online, has_root
+
+
+def WriteConfigurations(configurations, path):
+  # Currently we only support installing to the first device.
+  build_utils.WriteJson(configurations[:1], path, only_if_changed=True)
+
+
+def ReadConfigurations(path):
+  return build_utils.ReadJson(path)
+
+
+def GetBuildDevice(configurations):
+  assert len(configurations) == 1
+  return BuildDevice(configurations[0])
+
+
+def GetBuildDeviceFromPath(path):
+  configurations = ReadConfigurations(path)
+  if len(configurations) > 0:
+    return GetBuildDevice(ReadConfigurations(path))
+  return None
+
diff --git a/build/android/gyp/util/build_utils.py b/build/android/gyp/util/build_utils.py
new file mode 100644
index 0000000..7c32bc1
--- /dev/null
+++ b/build/android/gyp/util/build_utils.py
@@ -0,0 +1,527 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import ast
+import contextlib
+import fnmatch
+import json
+import os
+import pipes
+import re
+import shlex
+import shutil
+import stat
+import subprocess
+import sys
+import tempfile
+import zipfile
+
+# Some clients do not add //build/android/gyp to PYTHONPATH.
+import md5_check  # pylint: disable=relative-import
+
+sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
+from pylib.constants import host_paths
+
+COLORAMA_ROOT = os.path.join(host_paths.DIR_SOURCE_ROOT,
+                             'third_party', 'colorama', 'src')
+# aapt should ignore OWNERS files in addition the default ignore pattern.
+AAPT_IGNORE_PATTERN = ('!OWNERS:!.svn:!.git:!.ds_store:!*.scc:.*:<dir>_*:' +
+                       '!CVS:!thumbs.db:!picasa.ini:!*~:!*.d.stamp')
+_HERMETIC_TIMESTAMP = (2001, 1, 1, 0, 0, 0)
+_HERMETIC_FILE_ATTR = (0644 << 16L)
+
+
+@contextlib.contextmanager
+def TempDir():
+  dirname = tempfile.mkdtemp()
+  try:
+    yield dirname
+  finally:
+    shutil.rmtree(dirname)
+
+
+def MakeDirectory(dir_path):
+  try:
+    os.makedirs(dir_path)
+  except OSError:
+    pass
+
+
+def DeleteDirectory(dir_path):
+  if os.path.exists(dir_path):
+    shutil.rmtree(dir_path)
+
+
+def Touch(path, fail_if_missing=False):
+  if fail_if_missing and not os.path.exists(path):
+    raise Exception(path + ' doesn\'t exist.')
+
+  MakeDirectory(os.path.dirname(path))
+  with open(path, 'a'):
+    os.utime(path, None)
+
+
+def FindInDirectory(directory, filename_filter):
+  files = []
+  for root, _dirnames, filenames in os.walk(directory):
+    matched_files = fnmatch.filter(filenames, filename_filter)
+    files.extend((os.path.join(root, f) for f in matched_files))
+  return files
+
+
+def FindInDirectories(directories, filename_filter):
+  all_files = []
+  for directory in directories:
+    all_files.extend(FindInDirectory(directory, filename_filter))
+  return all_files
+
+
+def ParseGnList(gn_string):
+  # TODO(brettw) bug 573132: This doesn't handle GN escaping properly, so any
+  # weird characters like $ or \ in the strings will be corrupted.
+  #
+  # The code should import build/gn_helpers.py and then do:
+  #   parser = gn_helpers.GNValueParser(gn_string)
+  #   return return parser.ParseList()
+  # As of this writing, though, there is a CastShell build script that sends
+  # JSON through this function, and using correct GN parsing corrupts that.
+  #
+  # We need to be consistent about passing either JSON or GN lists through
+  # this function.
+  return ast.literal_eval(gn_string)
+
+
+def ParseGypList(gyp_string):
+  # The ninja generator doesn't support $ in strings, so use ## to
+  # represent $.
+  # TODO(cjhopman): Remove when
+  # https://code.google.com/p/gyp/issues/detail?id=327
+  # is addressed.
+  gyp_string = gyp_string.replace('##', '$')
+
+  if gyp_string.startswith('['):
+    return ParseGnList(gyp_string)
+  return shlex.split(gyp_string)
+
+
+def CheckOptions(options, parser, required=None):
+  if not required:
+    return
+  for option_name in required:
+    if getattr(options, option_name) is None:
+      parser.error('--%s is required' % option_name.replace('_', '-'))
+
+
+def WriteJson(obj, path, only_if_changed=False):
+  old_dump = None
+  if os.path.exists(path):
+    with open(path, 'r') as oldfile:
+      old_dump = oldfile.read()
+
+  new_dump = json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': '))
+
+  if not only_if_changed or old_dump != new_dump:
+    with open(path, 'w') as outfile:
+      outfile.write(new_dump)
+
+
+def ReadJson(path):
+  with open(path, 'r') as jsonfile:
+    return json.load(jsonfile)
+
+
+class CalledProcessError(Exception):
+  """This exception is raised when the process run by CheckOutput
+  exits with a non-zero exit code."""
+
+  def __init__(self, cwd, args, output):
+    super(CalledProcessError, self).__init__()
+    self.cwd = cwd
+    self.args = args
+    self.output = output
+
+  def __str__(self):
+    # A user should be able to simply copy and paste the command that failed
+    # into their shell.
+    copyable_command = '( cd {}; {} )'.format(os.path.abspath(self.cwd),
+        ' '.join(map(pipes.quote, self.args)))
+    return 'Command failed: {}\n{}'.format(copyable_command, self.output)
+
+
+# This can be used in most cases like subprocess.check_output(). The output,
+# particularly when the command fails, better highlights the command's failure.
+# If the command fails, raises a build_utils.CalledProcessError.
+def CheckOutput(args, cwd=None, env=None,
+                print_stdout=False, print_stderr=True,
+                stdout_filter=None,
+                stderr_filter=None,
+                fail_func=lambda returncode, stderr: returncode != 0):
+  if not cwd:
+    cwd = os.getcwd()
+
+  child = subprocess.Popen(args,
+      stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd, env=env)
+  stdout, stderr = child.communicate()
+
+  if stdout_filter is not None:
+    stdout = stdout_filter(stdout)
+
+  if stderr_filter is not None:
+    stderr = stderr_filter(stderr)
+
+  if fail_func(child.returncode, stderr):
+    raise CalledProcessError(cwd, args, stdout + stderr)
+
+  if print_stdout:
+    sys.stdout.write(stdout)
+  if print_stderr:
+    sys.stderr.write(stderr)
+
+  return stdout
+
+
+def GetModifiedTime(path):
+  # For a symlink, the modified time should be the greater of the link's
+  # modified time and the modified time of the target.
+  return max(os.lstat(path).st_mtime, os.stat(path).st_mtime)
+
+
+def IsTimeStale(output, inputs):
+  if not os.path.exists(output):
+    return True
+
+  output_time = GetModifiedTime(output)
+  for i in inputs:
+    if GetModifiedTime(i) > output_time:
+      return True
+  return False
+
+
+def IsDeviceReady():
+  device_state = CheckOutput(['adb', 'get-state'])
+  return device_state.strip() == 'device'
+
+
+def CheckZipPath(name):
+  if os.path.normpath(name) != name:
+    raise Exception('Non-canonical zip path: %s' % name)
+  if os.path.isabs(name):
+    raise Exception('Absolute zip path: %s' % name)
+
+
+def IsSymlink(zip_file, name):
+  zi = zip_file.getinfo(name)
+
+  # The two high-order bytes of ZipInfo.external_attr represent
+  # UNIX permissions and file type bits.
+  return stat.S_ISLNK(zi.external_attr >> 16L)
+
+
+def ExtractAll(zip_path, path=None, no_clobber=True, pattern=None,
+               predicate=None):
+  if path is None:
+    path = os.getcwd()
+  elif not os.path.exists(path):
+    MakeDirectory(path)
+
+  with zipfile.ZipFile(zip_path) as z:
+    for name in z.namelist():
+      if name.endswith('/'):
+        continue
+      if pattern is not None:
+        if not fnmatch.fnmatch(name, pattern):
+          continue
+      if predicate and not predicate(name):
+        continue
+      CheckZipPath(name)
+      if no_clobber:
+        output_path = os.path.join(path, name)
+        if os.path.exists(output_path):
+          raise Exception(
+              'Path already exists from zip: %s %s %s'
+              % (zip_path, name, output_path))
+      if IsSymlink(z, name):
+        dest = os.path.join(path, name)
+        MakeDirectory(os.path.dirname(dest))
+        os.symlink(z.read(name), dest)
+      else:
+        z.extract(name, path)
+
+
+def AddToZipHermetic(zip_file, zip_path, src_path=None, data=None,
+                     compress=None):
+  """Adds a file to the given ZipFile with a hard-coded modified time.
+
+  Args:
+    zip_file: ZipFile instance to add the file to.
+    zip_path: Destination path within the zip file.
+    src_path: Path of the source file. Mutually exclusive with |data|.
+    data: File data as a string.
+    compress: Whether to enable compression. Default is take from ZipFile
+        constructor.
+  """
+  assert (src_path is None) != (data is None), (
+      '|src_path| and |data| are mutually exclusive.')
+  CheckZipPath(zip_path)
+  zipinfo = zipfile.ZipInfo(filename=zip_path, date_time=_HERMETIC_TIMESTAMP)
+  zipinfo.external_attr = _HERMETIC_FILE_ATTR
+
+  if src_path and os.path.islink(src_path):
+    zipinfo.filename = zip_path
+    zipinfo.external_attr |= stat.S_IFLNK << 16L # mark as a symlink
+    zip_file.writestr(zipinfo, os.readlink(src_path))
+    return
+
+  if src_path:
+    with file(src_path) as f:
+      data = f.read()
+
+  # zipfile will deflate even when it makes the file bigger. To avoid
+  # growing files, disable compression at an arbitrary cut off point.
+  if len(data) < 16:
+    compress = False
+
+  # None converts to ZIP_STORED, when passed explicitly rather than the
+  # default passed to the ZipFile constructor.
+  compress_type = zip_file.compression
+  if compress is not None:
+    compress_type = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED
+  zip_file.writestr(zipinfo, data, compress_type)
+
+
+def DoZip(inputs, output, base_dir=None):
+  """Creates a zip file from a list of files.
+
+  Args:
+    inputs: A list of paths to zip, or a list of (zip_path, fs_path) tuples.
+    output: Destination .zip file.
+    base_dir: Prefix to strip from inputs.
+  """
+  input_tuples = []
+  for tup in inputs:
+    if isinstance(tup, basestring):
+      tup = (os.path.relpath(tup, base_dir), tup)
+    input_tuples.append(tup)
+
+  # Sort by zip path to ensure stable zip ordering.
+  input_tuples.sort(key=lambda tup: tup[0])
+  with zipfile.ZipFile(output, 'w') as outfile:
+    for zip_path, fs_path in input_tuples:
+      AddToZipHermetic(outfile, zip_path, src_path=fs_path)
+
+
+def ZipDir(output, base_dir):
+  """Creates a zip file from a directory."""
+  inputs = []
+  for root, _, files in os.walk(base_dir):
+    for f in files:
+      inputs.append(os.path.join(root, f))
+  DoZip(inputs, output, base_dir)
+
+
+def MatchesGlob(path, filters):
+  """Returns whether the given path matches any of the given glob patterns."""
+  return filters and any(fnmatch.fnmatch(path, f) for f in filters)
+
+
+def MergeZips(output, inputs, exclude_patterns=None, path_transform=None):
+  path_transform = path_transform or (lambda p, z: p)
+  added_names = set()
+
+  with zipfile.ZipFile(output, 'w') as out_zip:
+    for in_file in inputs:
+      with zipfile.ZipFile(in_file, 'r') as in_zip:
+        in_zip._expected_crc = None
+        for info in in_zip.infolist():
+          # Ignore directories.
+          if info.filename[-1] == '/':
+            continue
+          dst_name = path_transform(info.filename, in_file)
+          already_added = dst_name in added_names
+          if not already_added and not MatchesGlob(dst_name, exclude_patterns):
+            AddToZipHermetic(out_zip, dst_name, data=in_zip.read(info))
+            added_names.add(dst_name)
+
+
+def PrintWarning(message):
+  print 'WARNING: ' + message
+
+
+def PrintBigWarning(message):
+  print '*****     ' * 8
+  PrintWarning(message)
+  print '*****     ' * 8
+
+
+def GetSortedTransitiveDependencies(top, deps_func):
+  """Gets the list of all transitive dependencies in sorted order.
+
+  There should be no cycles in the dependency graph.
+
+  Args:
+    top: a list of the top level nodes
+    deps_func: A function that takes a node and returns its direct dependencies.
+  Returns:
+    A list of all transitive dependencies of nodes in top, in order (a node will
+    appear in the list at a higher index than all of its dependencies).
+  """
+  def Node(dep):
+    return (dep, deps_func(dep))
+
+  # First: find all deps
+  unchecked_deps = list(top)
+  all_deps = set(top)
+  while unchecked_deps:
+    dep = unchecked_deps.pop()
+    new_deps = deps_func(dep).difference(all_deps)
+    unchecked_deps.extend(new_deps)
+    all_deps = all_deps.union(new_deps)
+
+  # Then: simple, slow topological sort.
+  sorted_deps = []
+  unsorted_deps = dict(map(Node, all_deps))
+  while unsorted_deps:
+    for library, dependencies in unsorted_deps.items():
+      if not dependencies.intersection(unsorted_deps.keys()):
+        sorted_deps.append(library)
+        del unsorted_deps[library]
+
+  return sorted_deps
+
+
+def GetPythonDependencies():
+  """Gets the paths of imported non-system python modules.
+
+  A path is assumed to be a "system" import if it is outside of chromium's
+  src/. The paths will be relative to the current directory.
+  """
+  module_paths = (m.__file__ for m in sys.modules.itervalues()
+                  if m is not None and hasattr(m, '__file__'))
+
+  abs_module_paths = map(os.path.abspath, module_paths)
+
+  assert os.path.isabs(host_paths.DIR_SOURCE_ROOT)
+  non_system_module_paths = [
+      p for p in abs_module_paths if p.startswith(host_paths.DIR_SOURCE_ROOT)]
+  def ConvertPycToPy(s):
+    if s.endswith('.pyc'):
+      return s[:-1]
+    return s
+
+  non_system_module_paths = map(ConvertPycToPy, non_system_module_paths)
+  non_system_module_paths = map(os.path.relpath, non_system_module_paths)
+  return sorted(set(non_system_module_paths))
+
+
+def AddDepfileOption(parser):
+  # TODO(agrieve): Get rid of this once we've moved to argparse.
+  if hasattr(parser, 'add_option'):
+    func = parser.add_option
+  else:
+    func = parser.add_argument
+  func('--depfile',
+       help='Path to depfile. Must be specified as the action\'s first output.')
+
+
+def WriteDepfile(path, dependencies):
+  with open(path, 'w') as depfile:
+    depfile.write(path)
+    depfile.write(': ')
+    depfile.write(' '.join(dependencies))
+    depfile.write('\n')
+
+
+def ExpandFileArgs(args):
+  """Replaces file-arg placeholders in args.
+
+  These placeholders have the form:
+    @FileArg(filename:key1:key2:...:keyn)
+
+  The value of such a placeholder is calculated by reading 'filename' as json.
+  And then extracting the value at [key1][key2]...[keyn].
+
+  Note: This intentionally does not return the list of files that appear in such
+  placeholders. An action that uses file-args *must* know the paths of those
+  files prior to the parsing of the arguments (typically by explicitly listing
+  them in the action's inputs in build files).
+  """
+  new_args = list(args)
+  file_jsons = dict()
+  r = re.compile('@FileArg\((.*?)\)')
+  for i, arg in enumerate(args):
+    match = r.search(arg)
+    if not match:
+      continue
+
+    if match.end() != len(arg):
+      raise Exception('Unexpected characters after FileArg: ' + arg)
+
+    lookup_path = match.group(1).split(':')
+    file_path = lookup_path[0]
+    if not file_path in file_jsons:
+      file_jsons[file_path] = ReadJson(file_path)
+
+    expansion = file_jsons[file_path]
+    for k in lookup_path[1:]:
+      expansion = expansion[k]
+
+    new_args[i] = arg[:match.start()] + str(expansion)
+
+  return new_args
+
+
+def CallAndWriteDepfileIfStale(function, options, record_path=None,
+                               input_paths=None, input_strings=None,
+                               output_paths=None, force=False,
+                               pass_changes=False,
+                               depfile_deps=None):
+  """Wraps md5_check.CallAndRecordIfStale() and also writes dep & stamp files.
+
+  Depfiles and stamp files are automatically added to output_paths when present
+  in the |options| argument. They are then created after |function| is called.
+
+  By default, only python dependencies are added to the depfile. If there are
+  other input paths that are not captured by GN deps, then they should be listed
+  in depfile_deps. It's important to write paths to the depfile that are already
+  captured by GN deps since GN args can cause GN deps to change, and such
+  changes are not immediately reflected in depfiles (http://crbug.com/589311).
+  """
+  if not output_paths:
+    raise Exception('At least one output_path must be specified.')
+  input_paths = list(input_paths or [])
+  input_strings = list(input_strings or [])
+  output_paths = list(output_paths or [])
+
+  python_deps = None
+  if hasattr(options, 'depfile') and options.depfile:
+    python_deps = GetPythonDependencies()
+    # List python deps in input_strings rather than input_paths since the
+    # contents of them does not change what gets written to the depfile.
+    input_strings += python_deps
+    output_paths += [options.depfile]
+
+  stamp_file = hasattr(options, 'stamp') and options.stamp
+  if stamp_file:
+    output_paths += [stamp_file]
+
+  def on_stale_md5(changes):
+    args = (changes,) if pass_changes else ()
+    function(*args)
+    if python_deps is not None:
+      all_depfile_deps = list(python_deps)
+      if depfile_deps:
+        all_depfile_deps.extend(depfile_deps)
+      WriteDepfile(options.depfile, all_depfile_deps)
+    if stamp_file:
+      Touch(stamp_file)
+
+  md5_check.CallAndRecordIfStale(
+      on_stale_md5,
+      record_path=record_path,
+      input_paths=input_paths,
+      input_strings=input_strings,
+      output_paths=output_paths,
+      force=force,
+      pass_changes=True)
+
diff --git a/build/android/gyp/util/md5_check.py b/build/android/gyp/util/md5_check.py
new file mode 100644
index 0000000..7dac2e4
--- /dev/null
+++ b/build/android/gyp/util/md5_check.py
@@ -0,0 +1,402 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import difflib
+import hashlib
+import itertools
+import json
+import os
+import sys
+import zipfile
+
+
+# When set and a difference is detected, a diff of what changed is printed.
+PRINT_EXPLANATIONS = int(os.environ.get('PRINT_BUILD_EXPLANATIONS', 0))
+
+# An escape hatch that causes all targets to be rebuilt.
+_FORCE_REBUILD = int(os.environ.get('FORCE_REBUILD', 0))
+
+
+def CallAndRecordIfStale(
+    function, record_path=None, input_paths=None, input_strings=None,
+    output_paths=None, force=False, pass_changes=False):
+  """Calls function if outputs are stale.
+
+  Outputs are considered stale if:
+  - any output_paths are missing, or
+  - the contents of any file within input_paths has changed, or
+  - the contents of input_strings has changed.
+
+  To debug which files are out-of-date, set the environment variable:
+      PRINT_MD5_DIFFS=1
+
+  Args:
+    function: The function to call.
+    record_path: Path to record metadata.
+      Defaults to output_paths[0] + '.md5.stamp'
+    input_paths: List of paths to calcualte an md5 sum on.
+    input_strings: List of strings to record verbatim.
+    output_paths: List of output paths.
+    force: Whether to treat outputs as missing regardless of whether they
+      actually are.
+    pass_changes: Whether to pass a Changes instance to |function|.
+  """
+  assert record_path or output_paths
+  input_paths = input_paths or []
+  input_strings = input_strings or []
+  output_paths = output_paths or []
+  record_path = record_path or output_paths[0] + '.md5.stamp'
+
+  assert record_path.endswith('.stamp'), (
+      'record paths must end in \'.stamp\' so that they are easy to find '
+      'and delete')
+
+  new_metadata = _Metadata()
+  new_metadata.AddStrings(input_strings)
+
+  for path in input_paths:
+    if _IsZipFile(path):
+      entries = _ExtractZipEntries(path)
+      new_metadata.AddZipFile(path, entries)
+    else:
+      new_metadata.AddFile(path, _Md5ForPath(path))
+
+  old_metadata = None
+  force = force or _FORCE_REBUILD
+  missing_outputs = [x for x in output_paths if force or not os.path.exists(x)]
+  # When outputs are missing, don't bother gathering change information.
+  if not missing_outputs and os.path.exists(record_path):
+    with open(record_path, 'r') as jsonfile:
+      try:
+        old_metadata = _Metadata.FromFile(jsonfile)
+      except:  # pylint: disable=bare-except
+        pass  # Not yet using new file format.
+
+  changes = Changes(old_metadata, new_metadata, force, missing_outputs)
+  if not changes.HasChanges():
+    return
+
+  if PRINT_EXPLANATIONS:
+    print '=' * 80
+    print 'Target is stale: %s' % record_path
+    print changes.DescribeDifference()
+    print '=' * 80
+
+  args = (changes,) if pass_changes else ()
+  function(*args)
+
+  with open(record_path, 'w') as f:
+    new_metadata.ToFile(f)
+
+
+class Changes(object):
+  """Provides and API for querying what changed between runs."""
+
+  def __init__(self, old_metadata, new_metadata, force, missing_outputs):
+    self.old_metadata = old_metadata
+    self.new_metadata = new_metadata
+    self.force = force
+    self.missing_outputs = missing_outputs
+
+  def _GetOldTag(self, path, subpath=None):
+    return self.old_metadata and self.old_metadata.GetTag(path, subpath)
+
+  def HasChanges(self):
+    """Returns whether any changes exist."""
+    return (self.force or
+            not self.old_metadata or
+            self.old_metadata.StringsMd5() != self.new_metadata.StringsMd5() or
+            self.old_metadata.FilesMd5() != self.new_metadata.FilesMd5())
+
+  def AddedOrModifiedOnly(self):
+    """Returns whether the only changes were from added or modified (sub)files.
+
+    No missing outputs, no removed paths/subpaths.
+    """
+    if (self.force or
+        not self.old_metadata or
+        self.old_metadata.StringsMd5() != self.new_metadata.StringsMd5()):
+      return False
+    if any(self.IterRemovedPaths()):
+      return False
+    for path in self.IterModifiedPaths():
+      if any(self.IterRemovedSubpaths(path)):
+        return False
+    return True
+
+  def IterAddedPaths(self):
+    """Generator for paths that were added."""
+    for path in self.new_metadata.IterPaths():
+      if self._GetOldTag(path) is None:
+        yield path
+
+  def IterAddedSubpaths(self, path):
+    """Generator for paths that were added within the given zip file."""
+    for subpath in self.new_metadata.IterSubpaths(path):
+      if self._GetOldTag(path, subpath) is None:
+        yield subpath
+
+  def IterRemovedPaths(self):
+    """Generator for paths that were removed."""
+    if self.old_metadata:
+      for path in self.old_metadata.IterPaths():
+        if self.new_metadata.GetTag(path) is None:
+          yield path
+
+  def IterRemovedSubpaths(self, path):
+    """Generator for paths that were removed within the given zip file."""
+    if self.old_metadata:
+      for subpath in self.old_metadata.IterSubpaths(path):
+        if self.new_metadata.GetTag(path, subpath) is None:
+          yield subpath
+
+  def IterModifiedPaths(self):
+    """Generator for paths whose contents have changed."""
+    for path in self.new_metadata.IterPaths():
+      old_tag = self._GetOldTag(path)
+      new_tag = self.new_metadata.GetTag(path)
+      if old_tag is not None and old_tag != new_tag:
+        yield path
+
+  def IterModifiedSubpaths(self, path):
+    """Generator for paths within a zip file whose contents have changed."""
+    for subpath in self.new_metadata.IterSubpaths(path):
+      old_tag = self._GetOldTag(path, subpath)
+      new_tag = self.new_metadata.GetTag(path, subpath)
+      if old_tag is not None and old_tag != new_tag:
+        yield subpath
+
+  def IterChangedPaths(self):
+    """Generator for all changed paths (added/removed/modified)."""
+    return itertools.chain(self.IterRemovedPaths(),
+                           self.IterModifiedPaths(),
+                           self.IterAddedPaths())
+
+  def IterChangedSubpaths(self, path):
+    """Generator for paths within a zip that were added/removed/modified."""
+    return itertools.chain(self.IterRemovedSubpaths(path),
+                           self.IterModifiedSubpaths(path),
+                           self.IterAddedSubpaths(path))
+
+  def DescribeDifference(self):
+    """Returns a human-readable description of what changed."""
+    if self.force:
+      return 'force=True'
+    elif self.missing_outputs:
+      return 'Outputs do not exist:\n  ' + '\n  '.join(self.missing_outputs)
+    elif self.old_metadata is None:
+      return 'Previous stamp file not found.'
+
+    if self.old_metadata.StringsMd5() != self.new_metadata.StringsMd5():
+      ndiff = difflib.ndiff(self.old_metadata.GetStrings(),
+                            self.new_metadata.GetStrings())
+      changed = [s for s in ndiff if not s.startswith(' ')]
+      return 'Input strings changed:\n  ' + '\n  '.join(changed)
+
+    if self.old_metadata.FilesMd5() == self.new_metadata.FilesMd5():
+      return "There's no difference."
+
+    lines = []
+    lines.extend('Added: ' + p for p in self.IterAddedPaths())
+    lines.extend('Removed: ' + p for p in self.IterRemovedPaths())
+    for path in self.IterModifiedPaths():
+      lines.append('Modified: ' + path)
+      lines.extend('  -> Subpath added: ' + p
+                   for p in self.IterAddedSubpaths(path))
+      lines.extend('  -> Subpath removed: ' + p
+                   for p in self.IterRemovedSubpaths(path))
+      lines.extend('  -> Subpath modified: ' + p
+                   for p in self.IterModifiedSubpaths(path))
+    if lines:
+      return 'Input files changed:\n  ' + '\n  '.join(lines)
+    return 'I have no idea what changed (there is a bug).'
+
+
+class _Metadata(object):
+  """Data model for tracking change metadata."""
+  # Schema:
+  # {
+  #   "files-md5": "VALUE",
+  #   "strings-md5": "VALUE",
+  #   "input-files": [
+  #     {
+  #       "path": "path.jar",
+  #       "tag": "{MD5 of entries}",
+  #       "entries": [
+  #         { "path": "org/chromium/base/Foo.class", "tag": "{CRC32}" }, ...
+  #       ]
+  #     }, {
+  #       "path": "path.txt",
+  #       "tag": "{MD5}",
+  #     }
+  #   ],
+  #   "input-strings": ["a", "b", ...],
+  # }
+  def __init__(self):
+    self._files_md5 = None
+    self._strings_md5 = None
+    self._files = []
+    self._strings = []
+    # Map of (path, subpath) -> entry. Created upon first call to _GetEntry().
+    self._file_map = None
+
+  @classmethod
+  def FromFile(cls, fileobj):
+    """Returns a _Metadata initialized from a file object."""
+    ret = cls()
+    obj = json.load(fileobj)
+    ret._files_md5 = obj['files-md5']
+    ret._strings_md5 = obj['strings-md5']
+    ret._files = obj['input-files']
+    ret._strings = obj['input-strings']
+    return ret
+
+  def ToFile(self, fileobj):
+    """Serializes metadata to the given file object."""
+    obj = {
+        "files-md5": self.FilesMd5(),
+        "strings-md5": self.StringsMd5(),
+        "input-files": self._files,
+        "input-strings": self._strings,
+    }
+    json.dump(obj, fileobj, indent=2)
+
+  def _AssertNotQueried(self):
+    assert self._files_md5 is None
+    assert self._strings_md5 is None
+    assert self._file_map is None
+
+  def AddStrings(self, values):
+    self._AssertNotQueried()
+    self._strings.extend(str(v) for v in values)
+
+  def AddFile(self, path, tag):
+    """Adds metadata for a non-zip file.
+
+    Args:
+      path: Path to the file.
+      tag: A short string representative of the file contents.
+    """
+    self._AssertNotQueried()
+    self._files.append({
+        'path': path,
+        'tag': tag,
+    })
+
+  def AddZipFile(self, path, entries):
+    """Adds metadata for a zip file.
+
+    Args:
+      path: Path to the file.
+      entries: List of (subpath, tag) tuples for entries within the zip.
+    """
+    self._AssertNotQueried()
+    tag = _ComputeInlineMd5(itertools.chain((e[0] for e in entries),
+                                            (e[1] for e in entries)))
+    self._files.append({
+        'path': path,
+        'tag': tag,
+        'entries': [{"path": e[0], "tag": e[1]} for e in entries],
+    })
+
+  def GetStrings(self):
+    """Returns the list of input strings."""
+    return self._strings
+
+  def FilesMd5(self):
+    """Lazily computes and returns the aggregate md5 of input files."""
+    if self._files_md5 is None:
+      # Omit paths from md5 since temporary files have random names.
+      self._files_md5 = _ComputeInlineMd5(
+          self.GetTag(p) for p in sorted(self.IterPaths()))
+    return self._files_md5
+
+  def StringsMd5(self):
+    """Lazily computes and returns the aggregate md5 of input strings."""
+    if self._strings_md5 is None:
+      self._strings_md5 = _ComputeInlineMd5(self._strings)
+    return self._strings_md5
+
+  def _GetEntry(self, path, subpath=None):
+    """Returns the JSON entry for the given path / subpath."""
+    if self._file_map is None:
+      self._file_map = {}
+      for entry in self._files:
+        self._file_map[(entry['path'], None)] = entry
+        for subentry in entry.get('entries', ()):
+          self._file_map[(entry['path'], subentry['path'])] = subentry
+    return self._file_map.get((path, subpath))
+
+  def GetTag(self, path, subpath=None):
+    """Returns the tag for the given path / subpath."""
+    ret = self._GetEntry(path, subpath)
+    return ret and ret['tag']
+
+  def IterPaths(self):
+    """Returns a generator for all top-level paths."""
+    return (e['path'] for e in self._files)
+
+  def IterSubpaths(self, path):
+    """Returns a generator for all subpaths in the given zip.
+
+    If the given path is not a zip file or doesn't exist, returns an empty
+    iterable.
+    """
+    outer_entry = self._GetEntry(path)
+    if not outer_entry:
+      return ()
+    subentries = outer_entry.get('entries', [])
+    return (entry['path'] for entry in subentries)
+
+
+def _UpdateMd5ForFile(md5, path, block_size=2**16):
+  with open(path, 'rb') as infile:
+    while True:
+      data = infile.read(block_size)
+      if not data:
+        break
+      md5.update(data)
+
+
+def _UpdateMd5ForDirectory(md5, dir_path):
+  for root, _, files in os.walk(dir_path):
+    for f in files:
+      _UpdateMd5ForFile(md5, os.path.join(root, f))
+
+
+def _Md5ForPath(path):
+  md5 = hashlib.md5()
+  if os.path.isdir(path):
+    _UpdateMd5ForDirectory(md5, path)
+  else:
+    _UpdateMd5ForFile(md5, path)
+  return md5.hexdigest()
+
+
+def _ComputeInlineMd5(iterable):
+  """Computes the md5 of the concatenated parameters."""
+  md5 = hashlib.md5()
+  for item in iterable:
+    md5.update(str(item))
+  return md5.hexdigest()
+
+
+def _IsZipFile(path):
+  """Returns whether to treat the given file as a zip file."""
+  # ijar doesn't set the CRC32 field.
+  if path.endswith('.interface.jar'):
+    return False
+  return path[-4:] in ('.zip', '.apk', '.jar') or path.endswith('.srcjar')
+
+
+def _ExtractZipEntries(path):
+  """Returns a list of (path, CRC32) of all files within |path|."""
+  entries = []
+  with zipfile.ZipFile(path) as zip_file:
+    for zip_info in zip_file.infolist():
+      # Skip directories and empty files.
+      if zip_info.CRC:
+        entries.append(
+            (zip_info.filename, zip_info.CRC + zip_info.compress_type))
+  return entries
diff --git a/build/android/gyp/util/md5_check_test.py b/build/android/gyp/util/md5_check_test.py
new file mode 100755
index 0000000..312d4a9
--- /dev/null
+++ b/build/android/gyp/util/md5_check_test.py
@@ -0,0 +1,144 @@
+#!/usr/bin/env python
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import fnmatch
+import tempfile
+import unittest
+import zipfile
+
+import md5_check # pylint: disable=W0403
+
+
+def _WriteZipFile(path, entries):
+  with zipfile.ZipFile(path, 'w') as zip_file:
+    for subpath, data in entries:
+      zip_file.writestr(subpath, data)
+
+
+class TestMd5Check(unittest.TestCase):
+  def setUp(self):
+    self.called = False
+    self.changes = None
+
+  def testCallAndRecordIfStale(self):
+    input_strings = ['string1', 'string2']
+    input_file1 = tempfile.NamedTemporaryFile(suffix='.txt')
+    input_file2 = tempfile.NamedTemporaryFile(suffix='.zip')
+    file1_contents = 'input file 1'
+    input_file1.write(file1_contents)
+    input_file1.flush()
+    # Test out empty zip file to start.
+    _WriteZipFile(input_file2.name, [])
+    input_files = [input_file1.name, input_file2.name]
+
+    record_path = tempfile.NamedTemporaryFile(suffix='.stamp')
+
+    def CheckCallAndRecord(should_call, message, force=False,
+                           outputs_specified=False, outputs_missing=False,
+                           expected_changes=None, added_or_modified_only=None):
+      output_paths = None
+      if outputs_specified:
+        output_file1 = tempfile.NamedTemporaryFile()
+        if outputs_missing:
+          output_file1.close()  # Gets deleted on close().
+        output_paths = [output_file1.name]
+
+      self.called = False
+      self.changes = None
+      if expected_changes or added_or_modified_only is not None:
+        def MarkCalled(changes):
+          self.called = True
+          self.changes = changes
+      else:
+        def MarkCalled():
+          self.called = True
+
+      md5_check.CallAndRecordIfStale(
+          MarkCalled,
+          record_path=record_path.name,
+          input_paths=input_files,
+          input_strings=input_strings,
+          output_paths=output_paths,
+          force=force,
+          pass_changes=(expected_changes or added_or_modified_only) is not None)
+      self.assertEqual(should_call, self.called, message)
+      if expected_changes:
+        description = self.changes.DescribeDifference()
+        self.assertTrue(fnmatch.fnmatch(description, expected_changes),
+                        'Expected %s to match %s' % (
+                        repr(description), repr(expected_changes)))
+      if should_call and added_or_modified_only is not None:
+        self.assertEqual(added_or_modified_only,
+                         self.changes.AddedOrModifiedOnly())
+
+    CheckCallAndRecord(True, 'should call when record doesn\'t exist',
+                       expected_changes='Previous stamp file not found.',
+                       added_or_modified_only=False)
+    CheckCallAndRecord(False, 'should not call when nothing changed')
+    CheckCallAndRecord(False, 'should not call when nothing changed #2',
+                       outputs_specified=True, outputs_missing=False)
+    CheckCallAndRecord(True, 'should call when output missing',
+                       outputs_specified=True, outputs_missing=True,
+                       expected_changes='Outputs do not exist:*',
+                       added_or_modified_only=False)
+    CheckCallAndRecord(True, force=True, message='should call when forced',
+                       expected_changes='force=True',
+                       added_or_modified_only=False)
+
+    input_file1.write('some more input')
+    input_file1.flush()
+    CheckCallAndRecord(True, 'changed input file should trigger call',
+                       expected_changes='*Modified: %s' % input_file1.name,
+                       added_or_modified_only=True)
+
+    input_files = input_files[::-1]
+    CheckCallAndRecord(False, 'reordering of inputs shouldn\'t trigger call')
+
+    input_files = input_files[:1]
+    CheckCallAndRecord(True, 'removing file should trigger call',
+                       expected_changes='*Removed: %s' % input_file1.name,
+                       added_or_modified_only=False)
+
+    input_files.append(input_file1.name)
+    CheckCallAndRecord(True, 'added input file should trigger call',
+                       expected_changes='*Added: %s' % input_file1.name,
+                       added_or_modified_only=True)
+
+    input_strings[0] = input_strings[0] + ' a bit longer'
+    CheckCallAndRecord(True, 'changed input string should trigger call',
+                       expected_changes='*Input strings changed*',
+                       added_or_modified_only=False)
+
+    input_strings = input_strings[::-1]
+    CheckCallAndRecord(True, 'reordering of string inputs should trigger call',
+                       expected_changes='*Input strings changed*')
+
+    input_strings = input_strings[:1]
+    CheckCallAndRecord(True, 'removing a string should trigger call')
+
+    input_strings.append('a brand new string')
+    CheckCallAndRecord(True, 'added input string should trigger call')
+
+    _WriteZipFile(input_file2.name, [('path/1.txt', '1')])
+    CheckCallAndRecord(True, 'added subpath should trigger call',
+                       expected_changes='*Modified: %s*Subpath added: %s' % (
+                                        input_file2.name, 'path/1.txt'),
+                       added_or_modified_only=True)
+    _WriteZipFile(input_file2.name, [('path/1.txt', '2')])
+    CheckCallAndRecord(True, 'changed subpath should trigger call',
+                       expected_changes='*Modified: %s*Subpath modified: %s' % (
+                                        input_file2.name, 'path/1.txt'),
+                       added_or_modified_only=True)
+    CheckCallAndRecord(False, 'should not call when nothing changed')
+
+    _WriteZipFile(input_file2.name, [])
+    CheckCallAndRecord(True, 'removed subpath should trigger call',
+                       expected_changes='*Modified: %s*Subpath removed: %s' % (
+                                        input_file2.name, 'path/1.txt'),
+                       added_or_modified_only=False)
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/build/android/gyp/util/proguard_util.py b/build/android/gyp/util/proguard_util.py
new file mode 100644
index 0000000..f315979
--- /dev/null
+++ b/build/android/gyp/util/proguard_util.py
@@ -0,0 +1,189 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import re
+from util import build_utils
+
+
+class _ProguardOutputFilter(object):
+  """ProGuard outputs boring stuff to stdout (proguard version, jar path, etc)
+  as well as interesting stuff (notes, warnings, etc). If stdout is entirely
+  boring, this class suppresses the output.
+  """
+
+  IGNORE_RE = re.compile(
+      r'(?:Pro.*version|Note:|Reading|Preparing|.*:.*(?:MANIFEST\.MF|\.empty))')
+
+  def __init__(self):
+    self._last_line_ignored = False
+
+  def __call__(self, output):
+    ret = []
+    for line in output.splitlines(True):
+      if not line.startswith(' '):
+        self._last_line_ignored = bool(self.IGNORE_RE.match(line))
+      elif 'You should check if you need to specify' in line:
+        self._last_line_ignored = True
+
+      if not self._last_line_ignored:
+        ret.append(line)
+    return ''.join(ret)
+
+
+class ProguardCmdBuilder(object):
+  def __init__(self, proguard_jar):
+    assert os.path.exists(proguard_jar)
+    self._proguard_jar_path = proguard_jar
+    self._tested_apk_info_path = None
+    self._tested_apk_info = None
+    self._mapping = None
+    self._libraries = None
+    self._injars = None
+    self._configs = None
+    self._outjar = None
+    self._cmd = None
+    self._verbose = False
+
+  def outjar(self, path):
+    assert self._cmd is None
+    assert self._outjar is None
+    self._outjar = path
+
+  def tested_apk_info(self, tested_apk_info_path):
+    assert self._cmd is None
+    assert self._tested_apk_info is None
+    self._tested_apk_info_path = tested_apk_info_path
+
+  def mapping(self, path):
+    assert self._cmd is None
+    assert self._mapping is None
+    assert os.path.exists(path), path
+    self._mapping = path
+
+  def libraryjars(self, paths):
+    assert self._cmd is None
+    assert self._libraries is None
+    for p in paths:
+      assert os.path.exists(p), p
+    self._libraries = paths
+
+  def injars(self, paths):
+    assert self._cmd is None
+    assert self._injars is None
+    for p in paths:
+      assert os.path.exists(p), p
+    self._injars = paths
+
+  def configs(self, paths):
+    assert self._cmd is None
+    assert self._configs is None
+    for p in paths:
+      assert os.path.exists(p), p
+    self._configs = paths
+
+  def verbose(self, verbose):
+    assert self._cmd is None
+    self._verbose = verbose
+
+  def build(self):
+    if self._cmd:
+      return self._cmd
+    assert self._injars is not None
+    assert self._outjar is not None
+    assert self._configs is not None
+    cmd = [
+      'java', '-jar', self._proguard_jar_path,
+      '-forceprocessing',
+    ]
+    if self._tested_apk_info_path:
+      assert len(self._configs) == 1
+      tested_apk_info = build_utils.ReadJson(self._tested_apk_info_path)
+      self._configs += tested_apk_info['configs']
+      self._injars = [
+          p for p in self._injars if not p in tested_apk_info['inputs']]
+      if not self._libraries:
+        self._libraries = []
+      self._libraries += tested_apk_info['inputs']
+      self._mapping = tested_apk_info['mapping']
+      cmd += [
+        '-dontobfuscate',
+        '-dontoptimize',
+        '-dontshrink',
+        '-dontskipnonpubliclibraryclassmembers',
+      ]
+
+    if self._mapping:
+      cmd += [
+        '-applymapping', self._mapping,
+      ]
+
+    if self._libraries:
+      cmd += [
+        '-libraryjars', ':'.join(self._libraries),
+      ]
+
+    cmd += [
+      '-injars', ':'.join(self._injars)
+    ]
+
+    for config_file in self._configs:
+      cmd += ['-include', config_file]
+
+    # The output jar must be specified after inputs.
+    cmd += [
+      '-outjars', self._outjar,
+      '-dump', self._outjar + '.dump',
+      '-printseeds', self._outjar + '.seeds',
+      '-printusage', self._outjar + '.usage',
+      '-printmapping', self._outjar + '.mapping',
+    ]
+
+    if self._verbose:
+      cmd.append('-verbose')
+
+    self._cmd = cmd
+    return self._cmd
+
+  def GetInputs(self):
+    self.build()
+    inputs = [self._proguard_jar_path] + self._configs + self._injars
+    if self._mapping:
+      inputs.append(self._mapping)
+    if self._libraries:
+      inputs += self._libraries
+    if self._tested_apk_info_path:
+      inputs += [self._tested_apk_info_path]
+    return inputs
+
+
+  def CheckOutput(self):
+    self.build()
+    # Proguard will skip writing these files if they would be empty. Create
+    # empty versions of them all now so that they are updated as the build
+    # expects.
+    open(self._outjar + '.dump', 'w').close()
+    open(self._outjar + '.seeds', 'w').close()
+    open(self._outjar + '.usage', 'w').close()
+    open(self._outjar + '.mapping', 'w').close()
+    # Warning: and Error: are sent to stderr, but messages and Note: are sent
+    # to stdout.
+    stdout_filter = None
+    stderr_filter = None
+    if not self._verbose:
+      stdout_filter = _ProguardOutputFilter()
+      stderr_filter = _ProguardOutputFilter()
+    build_utils.CheckOutput(self._cmd, print_stdout=True,
+                            print_stderr=True,
+                            stdout_filter=stdout_filter,
+                            stderr_filter=stderr_filter)
+
+    this_info = {
+      'inputs': self._injars,
+      'configs': self._configs,
+      'mapping': self._outjar + '.mapping',
+    }
+
+    build_utils.WriteJson(this_info, self._outjar + '.info')
+
diff --git a/build/android/gyp/write_build_config.py b/build/android/gyp/write_build_config.py
new file mode 100755
index 0000000..e0b727b
--- /dev/null
+++ b/build/android/gyp/write_build_config.py
@@ -0,0 +1,539 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Writes a build_config file.
+
+The build_config file for a target is a json file containing information about
+how to build that target based on the target's dependencies. This includes
+things like: the javac classpath, the list of android resources dependencies,
+etc. It also includes the information needed to create the build_config for
+other targets that depend on that one.
+
+Android build scripts should not refer to the build_config directly, and the
+build specification should instead pass information in using the special
+file-arg syntax (see build_utils.py:ExpandFileArgs). That syntax allows passing
+of values in a json dict in a file and looks like this:
+  --python-arg=@FileArg(build_config_path:javac:classpath)
+
+Note: If paths to input files are passed in this way, it is important that:
+  1. inputs/deps of the action ensure that the files are available the first
+  time the action runs.
+  2. Either (a) or (b)
+    a. inputs/deps ensure that the action runs whenever one of the files changes
+    b. the files are added to the action's depfile
+"""
+
+import itertools
+import optparse
+import os
+import sys
+import xml.dom.minidom
+
+from util import build_utils
+from util import md5_check
+
+import write_ordered_libraries
+
+
+# Types that should never be used as a dependency of another build config.
+_ROOT_TYPES = ('android_apk', 'deps_dex', 'java_binary', 'resource_rewriter')
+# Types that should not allow code deps to pass through.
+_RESOURCE_TYPES = ('android_assets', 'android_resources')
+
+
+class AndroidManifest(object):
+  def __init__(self, path):
+    self.path = path
+    dom = xml.dom.minidom.parse(path)
+    manifests = dom.getElementsByTagName('manifest')
+    assert len(manifests) == 1
+    self.manifest = manifests[0]
+
+  def GetInstrumentation(self):
+    instrumentation_els = self.manifest.getElementsByTagName('instrumentation')
+    if len(instrumentation_els) == 0:
+      return None
+    if len(instrumentation_els) != 1:
+      raise Exception(
+          'More than one <instrumentation> element found in %s' % self.path)
+    return instrumentation_els[0]
+
+  def CheckInstrumentation(self, expected_package):
+    instr = self.GetInstrumentation()
+    if not instr:
+      raise Exception('No <instrumentation> elements found in %s' % self.path)
+    instrumented_package = instr.getAttributeNS(
+        'http://schemas.android.com/apk/res/android', 'targetPackage')
+    if instrumented_package != expected_package:
+      raise Exception(
+          'Wrong instrumented package. Expected %s, got %s'
+          % (expected_package, instrumented_package))
+
+  def GetPackageName(self):
+    return self.manifest.getAttribute('package')
+
+
+dep_config_cache = {}
+def GetDepConfig(path):
+  if not path in dep_config_cache:
+    dep_config_cache[path] = build_utils.ReadJson(path)['deps_info']
+  return dep_config_cache[path]
+
+
+def DepsOfType(wanted_type, configs):
+  return [c for c in configs if c['type'] == wanted_type]
+
+
+def GetAllDepsConfigsInOrder(deps_config_paths):
+  def GetDeps(path):
+    return set(GetDepConfig(path)['deps_configs'])
+  return build_utils.GetSortedTransitiveDependencies(deps_config_paths, GetDeps)
+
+
+def ResolveGroups(configs):
+  while True:
+    groups = DepsOfType('group', configs)
+    if not groups:
+      return configs
+    for config in groups:
+      index = configs.index(config)
+      expanded_configs = [GetDepConfig(p) for p in config['deps_configs']]
+      configs[index:index + 1] = expanded_configs
+
+
+class Deps(object):
+  def __init__(self, direct_deps_config_paths):
+    self.all_deps_config_paths = GetAllDepsConfigsInOrder(
+        direct_deps_config_paths)
+    self.direct_deps_configs = ResolveGroups(
+        [GetDepConfig(p) for p in direct_deps_config_paths])
+    self.all_deps_configs = [
+        GetDepConfig(p) for p in self.all_deps_config_paths]
+    self.direct_deps_config_paths = direct_deps_config_paths
+
+  def All(self, wanted_type=None):
+    if type is None:
+      return self.all_deps_configs
+    return DepsOfType(wanted_type, self.all_deps_configs)
+
+  def Direct(self, wanted_type=None):
+    if wanted_type is None:
+      return self.direct_deps_configs
+    return DepsOfType(wanted_type, self.direct_deps_configs)
+
+  def AllConfigPaths(self):
+    return self.all_deps_config_paths
+
+  def RemoveNonDirectDep(self, path):
+    if path in self.direct_deps_config_paths:
+      raise Exception('Cannot remove direct dep.')
+    self.all_deps_config_paths.remove(path)
+    self.all_deps_configs.remove(GetDepConfig(path))
+
+def _MergeAssets(all_assets):
+  """Merges all assets from the given deps.
+
+  Returns:
+    A tuple of lists: (compressed, uncompressed)
+    Each tuple entry is a list of "srcPath:zipPath". srcPath is the path of the
+    asset to add, and zipPath is the location within the zip (excluding assets/
+    prefix)
+  """
+  compressed = {}
+  uncompressed = {}
+  for asset_dep in all_assets:
+    entry = asset_dep['assets']
+    disable_compression = entry.get('disable_compression', False)
+    dest_map = uncompressed if disable_compression else compressed
+    other_map = compressed if disable_compression else uncompressed
+    outputs = entry.get('outputs', [])
+    for src, dest in itertools.izip_longest(entry['sources'], outputs):
+      if not dest:
+        dest = os.path.basename(src)
+      # Merge so that each path shows up in only one of the lists, and that
+      # deps of the same target override previous ones.
+      other_map.pop(dest, 0)
+      dest_map[dest] = src
+
+  def create_list(asset_map):
+    ret = ['%s:%s' % (src, dest) for dest, src in asset_map.iteritems()]
+    # Sort to ensure deterministic ordering.
+    ret.sort()
+    return ret
+
+  return create_list(compressed), create_list(uncompressed)
+
+
+def _FilterUnwantedDepsPaths(dep_paths, target_type):
+  # Don't allow root targets to be considered as a dep.
+  ret = [p for p in dep_paths if GetDepConfig(p)['type'] not in _ROOT_TYPES]
+
+  # Don't allow java libraries to cross through assets/resources.
+  if target_type in _RESOURCE_TYPES:
+    ret = [p for p in ret if GetDepConfig(p)['type'] in _RESOURCE_TYPES]
+  return ret
+
+
+def _AsInterfaceJar(jar_path):
+  return jar_path[:-3] + 'interface.jar'
+
+
+def main(argv):
+  parser = optparse.OptionParser()
+  build_utils.AddDepfileOption(parser)
+  parser.add_option('--build-config', help='Path to build_config output.')
+  parser.add_option(
+      '--type',
+      help='Type of this target (e.g. android_library).')
+  parser.add_option(
+      '--possible-deps-configs',
+      help='List of paths for dependency\'s build_config files. Some '
+      'dependencies may not write build_config files. Missing build_config '
+      'files are handled differently based on the type of this target.')
+
+  # android_resources options
+  parser.add_option('--srcjar', help='Path to target\'s resources srcjar.')
+  parser.add_option('--resources-zip', help='Path to target\'s resources zip.')
+  parser.add_option('--r-text', help='Path to target\'s R.txt file.')
+  parser.add_option('--package-name',
+      help='Java package name for these resources.')
+  parser.add_option('--android-manifest', help='Path to android manifest.')
+  parser.add_option('--is-locale-resource', action='store_true',
+                    help='Whether it is locale resource.')
+
+  # android_assets options
+  parser.add_option('--asset-sources', help='List of asset sources.')
+  parser.add_option('--asset-renaming-sources',
+                    help='List of asset sources with custom destinations.')
+  parser.add_option('--asset-renaming-destinations',
+                    help='List of asset custom destinations.')
+  parser.add_option('--disable-asset-compression', action='store_true',
+                    help='Whether to disable asset compression.')
+
+  # java library options
+  parser.add_option('--jar-path', help='Path to target\'s jar output.')
+  parser.add_option('--supports-android', action='store_true',
+      help='Whether this library supports running on the Android platform.')
+  parser.add_option('--requires-android', action='store_true',
+      help='Whether this library requires running on the Android platform.')
+  parser.add_option('--bypass-platform-checks', action='store_true',
+      help='Bypass checks for support/require Android platform.')
+
+  # android library options
+  parser.add_option('--dex-path', help='Path to target\'s dex output.')
+
+  # native library options
+  parser.add_option('--native-libs', help='List of top-level native libs.')
+  parser.add_option('--readelf-path', help='Path to toolchain\'s readelf.')
+
+  # apk options
+  parser.add_option('--apk-path', help='Path to the target\'s apk output.')
+  parser.add_option('--incremental-apk-path',
+                    help="Path to the target's incremental apk output.")
+  parser.add_option('--incremental-install-script-path',
+                    help="Path to the target's generated incremental install "
+                    "script.")
+
+  parser.add_option('--tested-apk-config',
+      help='Path to the build config of the tested apk (for an instrumentation '
+      'test apk).')
+  parser.add_option('--proguard-enabled', action='store_true',
+      help='Whether proguard is enabled for this apk.')
+  parser.add_option('--proguard-info',
+      help='Path to the proguard .info output for this apk.')
+  parser.add_option('--has-alternative-locale-resource', action='store_true',
+      help='Whether there is alternative-locale-resource in direct deps')
+
+  options, args = parser.parse_args(argv)
+
+  if args:
+    parser.error('No positional arguments should be given.')
+
+  required_options_map = {
+      'java_binary': ['build_config', 'jar_path'],
+      'java_library': ['build_config', 'jar_path'],
+      'android_assets': ['build_config'],
+      'android_resources': ['build_config', 'resources_zip'],
+      'android_apk': ['build_config', 'jar_path', 'dex_path', 'resources_zip'],
+      'deps_dex': ['build_config', 'dex_path'],
+      'resource_rewriter': ['build_config'],
+      'group': ['build_config'],
+  }
+  required_options = required_options_map.get(options.type)
+  if not required_options:
+    raise Exception('Unknown type: <%s>' % options.type)
+
+  if options.native_libs:
+    required_options.append('readelf_path')
+
+  build_utils.CheckOptions(options, parser, required_options)
+
+  if options.type == 'java_library':
+    if options.supports_android and not options.dex_path:
+      raise Exception('java_library that supports Android requires a dex path.')
+
+    if options.requires_android and not options.supports_android:
+      raise Exception(
+          '--supports-android is required when using --requires-android')
+
+  possible_deps_config_paths = build_utils.ParseGypList(
+      options.possible_deps_configs)
+
+  unknown_deps = [
+      c for c in possible_deps_config_paths if not os.path.exists(c)]
+
+  direct_deps_config_paths = [
+      c for c in possible_deps_config_paths if not c in unknown_deps]
+  direct_deps_config_paths = _FilterUnwantedDepsPaths(direct_deps_config_paths,
+                                                      options.type)
+
+  deps = Deps(direct_deps_config_paths)
+  all_inputs = deps.AllConfigPaths() + build_utils.GetPythonDependencies()
+
+  # Remove other locale resources if there is alternative_locale_resource in
+  # direct deps.
+  if options.has_alternative_locale_resource:
+    alternative = [r['path'] for r in deps.Direct('android_resources')
+                   if r.get('is_locale_resource')]
+    # We can only have one locale resources in direct deps.
+    if len(alternative) != 1:
+      raise Exception('The number of locale resource in direct deps is wrong %d'
+                       % len(alternative))
+    unwanted = [r['path'] for r in deps.All('android_resources')
+                if r.get('is_locale_resource') and r['path'] not in alternative]
+    for p in unwanted:
+      deps.RemoveNonDirectDep(p)
+
+
+  direct_library_deps = deps.Direct('java_library')
+  all_library_deps = deps.All('java_library')
+
+  direct_resources_deps = deps.Direct('android_resources')
+  all_resources_deps = deps.All('android_resources')
+  # Resources should be ordered with the highest-level dependency first so that
+  # overrides are done correctly.
+  all_resources_deps.reverse()
+
+  if options.type == 'android_apk' and options.tested_apk_config:
+    tested_apk_deps = Deps([options.tested_apk_config])
+    tested_apk_resources_deps = tested_apk_deps.All('android_resources')
+    all_resources_deps = [
+        d for d in all_resources_deps if not d in tested_apk_resources_deps]
+
+  # Initialize some common config.
+  config = {
+    'deps_info': {
+      'name': os.path.basename(options.build_config),
+      'path': options.build_config,
+      'type': options.type,
+      'deps_configs': direct_deps_config_paths
+    }
+  }
+  deps_info = config['deps_info']
+
+  if (options.type in ('java_binary', 'java_library') and
+      not options.bypass_platform_checks):
+    deps_info['requires_android'] = options.requires_android
+    deps_info['supports_android'] = options.supports_android
+
+    deps_require_android = (all_resources_deps +
+        [d['name'] for d in all_library_deps if d['requires_android']])
+    deps_not_support_android = (
+        [d['name'] for d in all_library_deps if not d['supports_android']])
+
+    if deps_require_android and not options.requires_android:
+      raise Exception('Some deps require building for the Android platform: ' +
+          str(deps_require_android))
+
+    if deps_not_support_android and options.supports_android:
+      raise Exception('Not all deps support the Android platform: ' +
+          str(deps_not_support_android))
+
+  if options.type in ('java_binary', 'java_library', 'android_apk'):
+    javac_classpath = [c['jar_path'] for c in direct_library_deps]
+    java_full_classpath = [c['jar_path'] for c in all_library_deps]
+    deps_info['resources_deps'] = [c['path'] for c in all_resources_deps]
+    deps_info['jar_path'] = options.jar_path
+    if options.type == 'android_apk' or options.supports_android:
+      deps_info['dex_path'] = options.dex_path
+    if options.type == 'android_apk':
+      deps_info['apk_path'] = options.apk_path
+      deps_info['incremental_apk_path'] = options.incremental_apk_path
+      deps_info['incremental_install_script_path'] = (
+          options.incremental_install_script_path)
+
+    # Classpath values filled in below (after applying tested_apk_config).
+    config['javac'] = {}
+
+  if options.type in ('java_binary', 'java_library'):
+    # Only resources might have srcjars (normal srcjar targets are listed in
+    # srcjar_deps). A resource's srcjar contains the R.java file for those
+    # resources, and (like Android's default build system) we allow a library to
+    # refer to the resources in any of its dependents.
+    config['javac']['srcjars'] = [
+        c['srcjar'] for c in direct_resources_deps if 'srcjar' in c]
+
+    # Used to strip out R.class for android_prebuilt()s.
+    if options.type == 'java_library':
+      config['javac']['resource_packages'] = [
+          c['package_name'] for c in all_resources_deps if 'package_name' in c]
+
+  if options.type == 'android_apk':
+    # Apks will get their resources srcjar explicitly passed to the java step.
+    config['javac']['srcjars'] = []
+
+  if options.type == 'android_assets':
+    all_asset_sources = []
+    if options.asset_renaming_sources:
+      all_asset_sources.extend(
+          build_utils.ParseGypList(options.asset_renaming_sources))
+    if options.asset_sources:
+      all_asset_sources.extend(build_utils.ParseGypList(options.asset_sources))
+
+    deps_info['assets'] = {
+        'sources': all_asset_sources
+    }
+    if options.asset_renaming_destinations:
+      deps_info['assets']['outputs'] = (
+          build_utils.ParseGypList(options.asset_renaming_destinations))
+    if options.disable_asset_compression:
+      deps_info['assets']['disable_compression'] = True
+
+  if options.type == 'android_resources':
+    deps_info['resources_zip'] = options.resources_zip
+    if options.srcjar:
+      deps_info['srcjar'] = options.srcjar
+    if options.android_manifest:
+      manifest = AndroidManifest(options.android_manifest)
+      deps_info['package_name'] = manifest.GetPackageName()
+    if options.package_name:
+      deps_info['package_name'] = options.package_name
+    if options.r_text:
+      deps_info['r_text'] = options.r_text
+    if options.is_locale_resource:
+      deps_info['is_locale_resource'] = True
+
+  if options.type in ('android_resources','android_apk', 'resource_rewriter'):
+    config['resources'] = {}
+    config['resources']['dependency_zips'] = [
+        c['resources_zip'] for c in all_resources_deps]
+    config['resources']['extra_package_names'] = []
+    config['resources']['extra_r_text_files'] = []
+
+  if options.type == 'android_apk' or options.type == 'resource_rewriter':
+    config['resources']['extra_package_names'] = [
+        c['package_name'] for c in all_resources_deps if 'package_name' in c]
+    config['resources']['extra_r_text_files'] = [
+        c['r_text'] for c in all_resources_deps if 'r_text' in c]
+
+  if options.type in ['android_apk', 'deps_dex']:
+    deps_dex_files = [c['dex_path'] for c in all_library_deps]
+
+  proguard_enabled = options.proguard_enabled
+  if options.type == 'android_apk':
+    deps_info['proguard_enabled'] = proguard_enabled
+
+  if proguard_enabled:
+    deps_info['proguard_info'] = options.proguard_info
+    config['proguard'] = {}
+    proguard_config = config['proguard']
+    proguard_config['input_paths'] = [options.jar_path] + java_full_classpath
+
+  # An instrumentation test apk should exclude the dex files that are in the apk
+  # under test.
+  if options.type == 'android_apk' and options.tested_apk_config:
+    tested_apk_library_deps = tested_apk_deps.All('java_library')
+    tested_apk_deps_dex_files = [c['dex_path'] for c in tested_apk_library_deps]
+    # Include in the classpath classes that are added directly to the apk under
+    # test (those that are not a part of a java_library).
+    tested_apk_config = GetDepConfig(options.tested_apk_config)
+    javac_classpath.append(tested_apk_config['jar_path'])
+    # Exclude dex files from the test apk that exist within the apk under test.
+    deps_dex_files = [
+        p for p in deps_dex_files if not p in tested_apk_deps_dex_files]
+
+    expected_tested_package = tested_apk_config['package_name']
+    AndroidManifest(options.android_manifest).CheckInstrumentation(
+        expected_tested_package)
+    if tested_apk_config['proguard_enabled']:
+      assert proguard_enabled, ('proguard must be enabled for instrumentation'
+          ' apks if it\'s enabled for the tested apk')
+
+  # Dependencies for the final dex file of an apk or a 'deps_dex'.
+  if options.type in ['android_apk', 'deps_dex']:
+    config['final_dex'] = {}
+    dex_config = config['final_dex']
+    dex_config['dependency_dex_files'] = deps_dex_files
+
+  if options.type in ('java_binary', 'java_library', 'android_apk'):
+    config['javac']['classpath'] = javac_classpath
+    config['javac']['interface_classpath'] = [
+        _AsInterfaceJar(p) for p in javac_classpath]
+    config['java'] = {
+      'full_classpath': java_full_classpath
+    }
+
+  if options.type == 'android_apk':
+    dependency_jars = [c['jar_path'] for c in all_library_deps]
+    all_interface_jars = [
+        _AsInterfaceJar(p) for p in dependency_jars + [options.jar_path]]
+    config['dist_jar'] = {
+      'dependency_jars': dependency_jars,
+      'all_interface_jars': all_interface_jars,
+    }
+    manifest = AndroidManifest(options.android_manifest)
+    deps_info['package_name'] = manifest.GetPackageName()
+    if not options.tested_apk_config and manifest.GetInstrumentation():
+      # This must then have instrumentation only for itself.
+      manifest.CheckInstrumentation(manifest.GetPackageName())
+
+    library_paths = []
+    java_libraries_list_holder = [None]
+    libraries = build_utils.ParseGypList(options.native_libs or '[]')
+    if libraries:
+      def recompute_ordered_libraries():
+        libraries_dir = os.path.dirname(libraries[0])
+        write_ordered_libraries.SetReadelfPath(options.readelf_path)
+        write_ordered_libraries.SetLibraryDirs([libraries_dir])
+        all_deps = (
+            write_ordered_libraries.GetSortedTransitiveDependenciesForBinaries(
+                libraries))
+        # Create a java literal array with the "base" library names:
+        # e.g. libfoo.so -> foo
+        java_libraries_list_holder[0] = ('{%s}' % ','.join(
+            ['"%s"' % s[3:-3] for s in all_deps]))
+        library_paths.extend(
+            write_ordered_libraries.FullLibraryPath(x) for x in all_deps)
+
+      # This step takes about 600ms on a z620 for chrome_apk, so it's worth
+      # caching.
+      md5_check.CallAndRecordIfStale(
+          recompute_ordered_libraries,
+          record_path=options.build_config + '.nativelibs.md5.stamp',
+          input_paths=libraries,
+          output_paths=[options.build_config])
+      if not library_paths:
+        prev_config = build_utils.ReadJson(options.build_config)
+        java_libraries_list_holder[0] = (
+            prev_config['native']['java_libraries_list'])
+        library_paths.extend(prev_config['native']['libraries'])
+
+    all_inputs.extend(library_paths)
+    config['native'] = {
+      'libraries': library_paths,
+      'java_libraries_list': java_libraries_list_holder[0],
+    }
+    config['assets'], config['uncompressed_assets'] = (
+        _MergeAssets(deps.All('android_assets')))
+
+  build_utils.WriteJson(config, options.build_config, only_if_changed=True)
+
+  if options.depfile:
+    build_utils.WriteDepfile(options.depfile, all_inputs)
+
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv[1:]))
diff --git a/build/android/gyp/write_ordered_libraries.py b/build/android/gyp/write_ordered_libraries.py
new file mode 100755
index 0000000..0fc9a8c
--- /dev/null
+++ b/build/android/gyp/write_ordered_libraries.py
@@ -0,0 +1,144 @@
+#!/usr/bin/env python
+#
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Writes dependency ordered list of native libraries.
+
+The list excludes any Android system libraries, as those are not bundled with
+the APK.
+
+This list of libraries is used for several steps of building an APK.
+In the component build, the --input-libraries only needs to be the top-level
+library (i.e. libcontent_shell_content_view). This will then use readelf to
+inspect the shared libraries and determine the full list of (non-system)
+libraries that should be included in the APK.
+"""
+
+# TODO(cjhopman): See if we can expose the list of library dependencies from
+# gyp, rather than calculating it ourselves.
+# http://crbug.com/225558
+
+import optparse
+import os
+import re
+import sys
+
+from util import build_utils
+
+_readelf = None
+_library_dirs = None
+
+_library_re = re.compile(
+    '.*NEEDED.*Shared library: \[(?P<library_name>.+)\]')
+
+
+def SetReadelfPath(path):
+  global _readelf
+  _readelf = path
+
+
+def SetLibraryDirs(dirs):
+  global _library_dirs
+  _library_dirs = dirs
+
+
+def FullLibraryPath(library_name):
+  assert _library_dirs is not None
+  for directory in _library_dirs:
+    path = '%s/%s' % (directory, library_name)
+    if os.path.exists(path):
+      return path
+  return library_name
+
+
+def IsSystemLibrary(library_name):
+  # If the library doesn't exist in the libraries directory, assume that it is
+  # an Android system library.
+  return not os.path.exists(FullLibraryPath(library_name))
+
+
+def CallReadElf(library_or_executable):
+  assert _readelf is not None
+  readelf_cmd = [_readelf,
+                 '-d',
+                 FullLibraryPath(library_or_executable)]
+  return build_utils.CheckOutput(readelf_cmd)
+
+
+def GetDependencies(library_or_executable):
+  elf = CallReadElf(library_or_executable)
+  return set(_library_re.findall(elf))
+
+
+def GetNonSystemDependencies(library_name):
+  all_deps = GetDependencies(library_name)
+  return set((lib for lib in all_deps if not IsSystemLibrary(lib)))
+
+
+def GetSortedTransitiveDependencies(libraries):
+  """Returns all transitive library dependencies in dependency order."""
+  return build_utils.GetSortedTransitiveDependencies(
+      libraries, GetNonSystemDependencies)
+
+
+def GetSortedTransitiveDependenciesForBinaries(binaries):
+  if binaries[0].endswith('.so'):
+    libraries = [os.path.basename(lib) for lib in binaries]
+  else:
+    assert len(binaries) == 1
+    all_deps = GetDependencies(binaries[0])
+    libraries = [lib for lib in all_deps if not IsSystemLibrary(lib)]
+
+  return GetSortedTransitiveDependencies(libraries)
+
+
+def main():
+  parser = optparse.OptionParser()
+  build_utils.AddDepfileOption(parser)
+
+  parser.add_option('--input-libraries',
+      help='A list of top-level input libraries.')
+  parser.add_option('--libraries-dir',
+      help='The directory which contains shared libraries.')
+  parser.add_option('--readelf', help='Path to the readelf binary.')
+  parser.add_option('--output', help='Path to the generated .json file.')
+  parser.add_option('--stamp', help='Path to touch on success.')
+
+  options, _ = parser.parse_args()
+
+  SetReadelfPath(options.readelf)
+  SetLibraryDirs(options.libraries_dir.split(','))
+
+  libraries = build_utils.ParseGypList(options.input_libraries)
+  if len(libraries):
+    libraries = GetSortedTransitiveDependenciesForBinaries(libraries)
+
+  # Convert to "base" library names: e.g. libfoo.so -> foo
+  java_libraries_list = (
+      '{%s}' % ','.join(['"%s"' % s[3:-3] for s in libraries]))
+
+  out_json = {
+      'libraries': libraries,
+      'lib_paths': [FullLibraryPath(l) for l in libraries],
+      'java_libraries_list': java_libraries_list
+      }
+  build_utils.WriteJson(
+      out_json,
+      options.output,
+      only_if_changed=True)
+
+  if options.stamp:
+    build_utils.Touch(options.stamp)
+
+  if options.depfile:
+    build_utils.WriteDepfile(
+        options.depfile,
+        libraries + build_utils.GetPythonDependencies())
+
+
+if __name__ == '__main__':
+  sys.exit(main())
+
+
diff --git a/build/android/gyp/zip.py b/build/android/gyp/zip.py
new file mode 100755
index 0000000..51322df
--- /dev/null
+++ b/build/android/gyp/zip.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Archives a set of files.
+"""
+
+import optparse
+import sys
+
+from util import build_utils
+
+def main():
+  parser = optparse.OptionParser()
+  parser.add_option('--input-dir', help='Directory of files to archive.')
+  parser.add_option('--output', help='Path to output archive.')
+  options, _ = parser.parse_args()
+
+  inputs = build_utils.FindInDirectory(options.input_dir, '*')
+  build_utils.DoZip(inputs, options.output, options.input_dir)
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/build/android/host_heartbeat.py b/build/android/host_heartbeat.py
new file mode 100755
index 0000000..8990592
--- /dev/null
+++ b/build/android/host_heartbeat.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Sends a heart beat pulse to the currently online Android devices.
+This heart beat lets the devices know that they are connected to a host.
+"""
+# pylint: disable=W0702
+
+import sys
+import time
+
+import devil_chromium
+from devil.android import device_utils
+
+PULSE_PERIOD = 20
+
+def main():
+  devil_chromium.Initialize()
+
+  while True:
+    try:
+      devices = device_utils.DeviceUtils.HealthyDevices(blacklist=None)
+      for d in devices:
+        d.RunShellCommand(['touch', '/sdcard/host_heartbeat'],
+                          check_return=True)
+    except:
+      # Keep the heatbeat running bypassing all errors.
+      pass
+    time.sleep(PULSE_PERIOD)
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/build/android/increase_size_for_speed.gypi b/build/android/increase_size_for_speed.gypi
new file mode 100644
index 0000000..c5600b1
--- /dev/null
+++ b/build/android/increase_size_for_speed.gypi
@@ -0,0 +1,42 @@
+# Copyright (c) 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This file is meant to be included to optimize a target for speed
+# rather than for size on Android.
+# This is used in some carefully tailored targets and is not meant
+# to be included everywhere. Before adding the template to another target,
+# please ask in chromium-dev@. See crbug.com/411909
+
+{
+  'configurations': {
+    'Release': {
+      'target_conditions': [
+        ['_toolset=="target"', {
+          'conditions': [
+            ['OS=="android"', {
+              'cflags!': ['-Os'],
+              'cflags': ['-O2'],
+            }],
+            # Do not merge -Os and -O2 in GCC LTO.
+            # LTO merges all optimization options at link-time. -O2 takes
+            # precedence over -Os. Avoid using LTO simultaneously
+            # on -Os and -O2 parts for that reason.
+            ['OS=="android" and clang==0 and use_lto==1', {
+              'cflags!': [
+                '-flto',
+                '-ffat-lto-objects',
+              ],
+            }],
+            ['OS=="android" and clang==0 and use_lto_o2==1', {
+              'cflags': [
+                '-flto',
+                '-ffat-lto-objects',
+              ],
+            }],
+          ],
+        }],
+      ],
+    },
+  },
+}
diff --git a/build/android/incremental_install/BUILD.gn b/build/android/incremental_install/BUILD.gn
new file mode 100644
index 0000000..3bb4696
--- /dev/null
+++ b/build/android/incremental_install/BUILD.gn
@@ -0,0 +1,19 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/config/android/rules.gni")
+
+android_library("bootstrap_java") {
+  # Use .dex rather than .dex.jar to be usable by package_apk().
+  dex_path = "$target_gen_dir/bootstrap.dex"
+  java_files = [
+    "java/org/chromium/incrementalinstall/BootstrapApplication.java",
+    "java/org/chromium/incrementalinstall/BootstrapInstrumentation.java",
+    "java/org/chromium/incrementalinstall/ClassLoaderPatcher.java",
+    "java/org/chromium/incrementalinstall/LockFile.java",
+    "java/org/chromium/incrementalinstall/Reflect.java",
+  ]
+  emma_never_instrument = true
+  run_findbugs_override = false
+}
diff --git a/build/android/incremental_install/__init__.py b/build/android/incremental_install/__init__.py
new file mode 100644
index 0000000..1aaf0e1
--- /dev/null
+++ b/build/android/incremental_install/__init__.py
@@ -0,0 +1,4 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
diff --git a/build/android/incremental_install/create_install_script.py b/build/android/incremental_install/create_install_script.py
new file mode 100755
index 0000000..5be4fe4
--- /dev/null
+++ b/build/android/incremental_install/create_install_script.py
@@ -0,0 +1,160 @@
+#!/usr/bin/env python
+
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Creates a script to run an "_incremental" .apk."""
+
+import argparse
+import os
+import pprint
+import sys
+
+sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
+sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, 'gyp'))
+
+from pylib.constants import host_paths
+from util import build_utils
+
+
+SCRIPT_TEMPLATE = """\
+#!/usr/bin/env python
+#
+# This file was generated by:
+#     //build/android/incremental_install/create_install_script.py
+
+import os
+import subprocess
+import sys
+
+
+def _ResolvePath(path):
+  script_directory = os.path.dirname(__file__)
+  return os.path.abspath(os.path.join(script_directory, path))
+
+
+# Exported to allow test runner to be able to install incremental apks.
+def GetInstallParameters():
+  apk_path = {apk_path}
+  dex_files = {dex_files}
+  dont_even_try = {dont_even_try}
+  native_libs = {native_libs}
+  show_proguard_warning = {show_proguard_warning}
+  splits = {splits}
+
+  return dict(apk_path=_ResolvePath(apk_path),
+              dex_files=[_ResolvePath(p) for p in dex_files],
+              dont_even_try=dont_even_try,
+              native_libs=[_ResolvePath(p) for p in native_libs],
+              show_proguard_warning=show_proguard_warning,
+              splits=[_ResolvePath(p) for p in splits])
+
+
+def main():
+  output_directory = {output_directory}
+  cmd_path = {cmd_path}
+  params = GetInstallParameters()
+  cmd_args = [
+      _ResolvePath(cmd_path),
+      '--output-directory', _ResolvePath(output_directory),
+  ]
+  for native_lib in params['native_libs']:
+    cmd_args.extend(('--native_lib', native_lib))
+  for dex_path in params['dex_files']:
+    cmd_args.extend(('--dex-file', dex_path))
+  for split in params['splits']:
+    cmd_args.extend(('--split', split))
+  cmd_args.append(params['apk_path'])
+  if params['dont_even_try']:
+    cmd_args.extend(('--dont-even-try', params['dont_even_try']))
+  if params['show_proguard_warning']:
+    cmd_args.append('--show-proguard-warning')
+  return subprocess.call(cmd_args + sys.argv[1:])
+
+if __name__ == '__main__':
+  sys.exit(main())
+"""
+
+
+def _ParseArgs(args):
+  args = build_utils.ExpandFileArgs(args)
+  parser = argparse.ArgumentParser()
+  build_utils.AddDepfileOption(parser)
+  parser.add_argument('--script-output-path',
+                      help='Output path for executable script.',
+                      required=True)
+  parser.add_argument('--output-directory',
+                      help='Path to the root build directory.',
+                      default='.')
+  parser.add_argument('--apk-path',
+                      help='Path to the .apk to install.',
+                      required=True)
+  parser.add_argument('--split',
+                      action='append',
+                      dest='splits',
+                      default=[],
+                      help='A glob matching the apk splits. '
+                           'Can be specified multiple times.')
+  parser.add_argument('--native-libs',
+                      action='append',
+                      default=[],
+                      help='GYP-list of paths to native libraries. Can be '
+                      'repeated.')
+  parser.add_argument('--dex-file',
+                      action='append',
+                      default=[],
+                      dest='dex_files',
+                      help='List of dex files to include.')
+  parser.add_argument('--dex-file-list',
+                      help='GYP-list of dex files.')
+  parser.add_argument('--show-proguard-warning',
+                      action='store_true',
+                      default=False,
+                      help='Print a warning about proguard being disabled')
+  parser.add_argument('--dont-even-try',
+                      help='Prints this message and exits.')
+
+  options = parser.parse_args(args)
+  options.dex_files += build_utils.ParseGypList(options.dex_file_list)
+  all_libs = []
+  for gyp_list in options.native_libs:
+    all_libs.extend(build_utils.ParseGypList(gyp_list))
+  options.native_libs = all_libs
+  return options
+
+
+def main(args):
+  options = _ParseArgs(args)
+
+  def relativize(path):
+    script_dir = os.path.dirname(options.script_output_path)
+    return path and os.path.relpath(path, script_dir)
+
+  installer_path = os.path.join(host_paths.DIR_SOURCE_ROOT, 'build', 'android',
+                                'incremental_install', 'installer.py')
+  pformat = pprint.pformat
+  template_args = {
+      'cmd_path': pformat(relativize(installer_path)),
+      'apk_path': pformat(relativize(options.apk_path)),
+      'output_directory': pformat(relativize(options.output_directory)),
+      'native_libs': pformat([relativize(p) for p in options.native_libs]),
+      'dex_files': pformat([relativize(p) for p in options.dex_files]),
+      'dont_even_try': pformat(options.dont_even_try),
+      'show_proguard_warning': pformat(options.show_proguard_warning),
+      'splits': pformat([relativize(p) for p in options.splits]),
+  }
+
+  with open(options.script_output_path, 'w') as script:
+    script.write(SCRIPT_TEMPLATE.format(**template_args))
+
+  os.chmod(options.script_output_path, 0750)
+
+  if options.depfile:
+    build_utils.WriteDepfile(
+        options.depfile,
+        build_utils.GetPythonDependencies())
+
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv[1:]))
diff --git a/build/android/incremental_install/generate_android_manifest.py b/build/android/incremental_install/generate_android_manifest.py
new file mode 100755
index 0000000..163b4c3
--- /dev/null
+++ b/build/android/incremental_install/generate_android_manifest.py
@@ -0,0 +1,110 @@
+#!/usr/bin/env python
+#
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Creates an AndroidManifest.xml for an incremental APK.
+
+Given the manifest file for the real APK, generates an AndroidManifest.xml with
+the application class changed to IncrementalApplication.
+"""
+
+import argparse
+import os
+import sys
+from xml.etree import ElementTree
+
+sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir, 'gyp'))
+from util import build_utils
+
+_ANDROID_NAMESPACE = 'http://schemas.android.com/apk/res/android'
+ElementTree.register_namespace('android', _ANDROID_NAMESPACE)
+
+_INCREMENTAL_APP_NAME = 'org.chromium.incrementalinstall.BootstrapApplication'
+_META_DATA_APP_NAME = 'incremental-install-real-app'
+_META_DATA_INSTRUMENTATION_NAME = 'incremental-install-real-instrumentation'
+_DEFAULT_APPLICATION_CLASS = 'android.app.Application'
+_DEFAULT_INSTRUMENTATION_CLASS = 'android.app.Instrumentation'
+
+
+def _AddNamespace(name):
+  """Adds the android namespace prefix to the given identifier."""
+  return '{%s}%s' % (_ANDROID_NAMESPACE, name)
+
+def _ParseArgs():
+  parser = argparse.ArgumentParser()
+  build_utils.AddDepfileOption(parser)
+  parser.add_argument('--src-manifest',
+                      help='The main manifest of the app',
+                      required=True)
+  parser.add_argument('--out-manifest',
+                      help='The output manifest',
+                      required=True)
+  parser.add_argument('--disable-isolated-processes',
+                      help='Changes all android:isolatedProcess to false. '
+                           'This is required on Android M+',
+                      action='store_true')
+  return parser.parse_args()
+
+
+def _CreateMetaData(parent, name, value):
+  meta_data_node = ElementTree.SubElement(parent, 'meta-data')
+  meta_data_node.set(_AddNamespace('name'), name)
+  meta_data_node.set(_AddNamespace('value'), value)
+
+
+def _ProcessManifest(main_manifest, disable_isolated_processes):
+  """Returns a transformed AndroidManifest.xml for use with _incremental apks.
+
+  Args:
+    main_manifest: Manifest contents to transform.
+    disable_isolated_processes: Whether to set all isolatedProcess attributes to
+        false
+
+  Returns:
+    The transformed AndroidManifest.xml.
+  """
+  if disable_isolated_processes:
+    main_manifest = main_manifest.replace('isolatedProcess="true"',
+                                          'isolatedProcess="false"')
+
+  doc = ElementTree.fromstring(main_manifest)
+  app_node = doc.find('application')
+  if app_node is None:
+    app_node = ElementTree.SubElement(doc, 'application')
+
+  real_app_class = app_node.get(_AddNamespace('name'),
+                                _DEFAULT_APPLICATION_CLASS)
+  app_node.set(_AddNamespace('name'), _INCREMENTAL_APP_NAME)
+  _CreateMetaData(app_node, _META_DATA_APP_NAME, real_app_class)
+
+  # Seems to be a bug in ElementTree, as doc.find() doesn't work here.
+  instrumentation_nodes = doc.findall('instrumentation')
+  if instrumentation_nodes:
+    instrumentation_node = instrumentation_nodes[0]
+    real_instrumentation_class = instrumentation_node.get(_AddNamespace('name'))
+    instrumentation_node.set(_AddNamespace('name'),
+                             _DEFAULT_INSTRUMENTATION_CLASS)
+    _CreateMetaData(app_node, _META_DATA_INSTRUMENTATION_NAME,
+                    real_instrumentation_class)
+
+  return ElementTree.tostring(doc, encoding='UTF-8')
+
+
+def main():
+  options = _ParseArgs()
+  with open(options.src_manifest) as f:
+    main_manifest_data = f.read()
+  new_manifest_data = _ProcessManifest(main_manifest_data,
+                                       options.disable_isolated_processes)
+  with open(options.out_manifest, 'w') as f:
+    f.write(new_manifest_data)
+
+  if options.depfile:
+    build_utils.WriteDepfile(
+        options.depfile,
+        [options.src_manifest] + build_utils.GetPythonDependencies())
+
+
+if __name__ == '__main__':
+  main()
diff --git a/build/android/incremental_install/installer.py b/build/android/incremental_install/installer.py
new file mode 100755
index 0000000..6c42911
--- /dev/null
+++ b/build/android/incremental_install/installer.py
@@ -0,0 +1,317 @@
+#!/usr/bin/env python
+#
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Install *_incremental.apk targets as well as their dependent files."""
+
+import argparse
+import glob
+import logging
+import os
+import posixpath
+import shutil
+import sys
+import zipfile
+
+sys.path.append(
+    os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)))
+import devil_chromium
+from devil.android import apk_helper
+from devil.android import device_utils
+from devil.android import device_errors
+from devil.android.sdk import version_codes
+from devil.utils import reraiser_thread
+from pylib import constants
+from pylib.utils import run_tests_helper
+from pylib.utils import time_profile
+
+prev_sys_path = list(sys.path)
+sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir, 'gyp'))
+from util import build_utils
+sys.path = prev_sys_path
+
+
+def _DeviceCachePath(device):
+  file_name = 'device_cache_%s.json' % device.adb.GetDeviceSerial()
+  return os.path.join(constants.GetOutDirectory(), file_name)
+
+
+def _TransformDexPaths(paths):
+  """Given paths like ["/a/b/c", "/a/c/d"], returns ["b.c", "c.d"]."""
+  if len(paths) == 1:
+    return [os.path.basename(paths[0])]
+
+  prefix_len = len(os.path.commonprefix(paths))
+  return [p[prefix_len:].replace(os.sep, '.') for p in paths]
+
+
+def _Execute(concurrently, *funcs):
+  """Calls all functions in |funcs| concurrently or in sequence."""
+  timer = time_profile.TimeProfile()
+  if concurrently:
+    reraiser_thread.RunAsync(funcs)
+  else:
+    for f in funcs:
+      f()
+  timer.Stop(log=False)
+  return timer
+
+
+def _GetDeviceIncrementalDir(package):
+  """Returns the device path to put incremental files for the given package."""
+  return '/data/local/tmp/incremental-app-%s' % package
+
+
+def _HasClasses(jar_path):
+  """Returns whether the given jar contains classes.dex."""
+  with zipfile.ZipFile(jar_path) as jar:
+    return 'classes.dex' in jar.namelist()
+
+
+def Uninstall(device, package, enable_device_cache=False):
+  """Uninstalls and removes all incremental files for the given package."""
+  main_timer = time_profile.TimeProfile()
+  device.Uninstall(package)
+  if enable_device_cache:
+    # Uninstall is rare, so just wipe the cache in this case.
+    cache_path = _DeviceCachePath(device)
+    if os.path.exists(cache_path):
+      os.unlink(cache_path)
+  device.RunShellCommand(['rm', '-rf', _GetDeviceIncrementalDir(package)],
+                         check_return=True)
+  logging.info('Uninstall took %s seconds.', main_timer.GetDelta())
+
+
+def Install(device, apk, split_globs=None, native_libs=None, dex_files=None,
+            enable_device_cache=False, use_concurrency=True,
+            show_proguard_warning=False, permissions=(),
+            allow_downgrade=True):
+  """Installs the given incremental apk and all required supporting files.
+
+  Args:
+    device: A DeviceUtils instance.
+    apk: The path to the apk, or an ApkHelper instance.
+    split_globs: Glob patterns for any required apk splits (optional).
+    native_libs: List of app's native libraries (optional).
+    dex_files: List of .dex.jar files that comprise the app's Dalvik code.
+    enable_device_cache: Whether to enable on-device caching of checksums.
+    use_concurrency: Whether to speed things up using multiple threads.
+    show_proguard_warning: Whether to print a warning about Proguard not being
+        enabled after installing.
+    permissions: A list of the permissions to grant, or None to grant all
+                 non-blacklisted permissions in the manifest.
+  """
+  main_timer = time_profile.TimeProfile()
+  install_timer = time_profile.TimeProfile()
+  push_native_timer = time_profile.TimeProfile()
+  push_dex_timer = time_profile.TimeProfile()
+
+  apk = apk_helper.ToHelper(apk)
+  apk_package = apk.GetPackageName()
+  device_incremental_dir = _GetDeviceIncrementalDir(apk_package)
+
+  # Install .apk(s) if any of them have changed.
+  def do_install():
+    install_timer.Start()
+    if split_globs:
+      splits = []
+      for split_glob in split_globs:
+        splits.extend((f for f in glob.glob(split_glob)))
+      device.InstallSplitApk(apk, splits, reinstall=True,
+                             allow_cached_props=True, permissions=permissions,
+                             allow_downgrade=allow_downgrade)
+    else:
+      device.Install(apk, reinstall=True, permissions=permissions,
+                     allow_downgrade=allow_downgrade)
+    install_timer.Stop(log=False)
+
+  # Push .so and .dex files to the device (if they have changed).
+  def do_push_files():
+    if native_libs:
+      push_native_timer.Start()
+      with build_utils.TempDir() as temp_dir:
+        device_lib_dir = posixpath.join(device_incremental_dir, 'lib')
+        for path in native_libs:
+          # Note: Can't use symlinks as they don't work when
+          # "adb push parent_dir" is used (like we do here).
+          shutil.copy(path, os.path.join(temp_dir, os.path.basename(path)))
+        device.PushChangedFiles([(temp_dir, device_lib_dir)],
+                                delete_device_stale=True)
+      push_native_timer.Stop(log=False)
+
+    if dex_files:
+      push_dex_timer.Start()
+      # Put all .dex files to be pushed into a temporary directory so that we
+      # can use delete_device_stale=True.
+      with build_utils.TempDir() as temp_dir:
+        device_dex_dir = posixpath.join(device_incremental_dir, 'dex')
+        # Ensure no two files have the same name.
+        transformed_names = _TransformDexPaths(dex_files)
+        for src_path, dest_name in zip(dex_files, transformed_names):
+          # Binary targets with no extra classes create .dex.jar without a
+          # classes.dex (which Android chokes on).
+          if _HasClasses(src_path):
+            shutil.copy(src_path, os.path.join(temp_dir, dest_name))
+        device.PushChangedFiles([(temp_dir, device_dex_dir)],
+                                delete_device_stale=True)
+      push_dex_timer.Stop(log=False)
+
+  def check_selinux():
+    # Marshmallow has no filesystem access whatsoever. It might be possible to
+    # get things working on Lollipop, but attempts so far have failed.
+    # http://crbug.com/558818
+    has_selinux = device.build_version_sdk >= version_codes.LOLLIPOP
+    if has_selinux and apk.HasIsolatedProcesses():
+      raise Exception('Cannot use incremental installs on Android L+ without '
+                      'first disabling isoloated processes.\n'
+                      'To do so, use GN arg:\n'
+                      '    disable_incremental_isolated_processes=true')
+
+  cache_path = _DeviceCachePath(device)
+  def restore_cache():
+    if not enable_device_cache:
+      logging.info('Ignoring device cache')
+      return
+    if os.path.exists(cache_path):
+      logging.info('Using device cache: %s', cache_path)
+      with open(cache_path) as f:
+        device.LoadCacheData(f.read())
+      # Delete the cached file so that any exceptions cause it to be cleared.
+      os.unlink(cache_path)
+    else:
+      logging.info('No device cache present: %s', cache_path)
+
+  def save_cache():
+    with open(cache_path, 'w') as f:
+      f.write(device.DumpCacheData())
+      logging.info('Wrote device cache: %s', cache_path)
+
+  # Create 2 lock files:
+  # * install.lock tells the app to pause on start-up (until we release it).
+  # * firstrun.lock is used by the app to pause all secondary processes until
+  #   the primary process finishes loading the .dex / .so files.
+  def create_lock_files():
+    # Creates or zeros out lock files.
+    cmd = ('D="%s";'
+           'mkdir -p $D &&'
+           'echo -n >$D/install.lock 2>$D/firstrun.lock')
+    device.RunShellCommand(cmd % device_incremental_dir, check_return=True)
+
+  # The firstrun.lock is released by the app itself.
+  def release_installer_lock():
+    device.RunShellCommand('echo > %s/install.lock' % device_incremental_dir,
+                           check_return=True)
+
+  # Concurrency here speeds things up quite a bit, but DeviceUtils hasn't
+  # been designed for multi-threading. Enabling only because this is a
+  # developer-only tool.
+  setup_timer = _Execute(
+      use_concurrency, create_lock_files, restore_cache, check_selinux)
+
+  _Execute(use_concurrency, do_install, do_push_files)
+
+  finalize_timer = _Execute(use_concurrency, release_installer_lock, save_cache)
+
+  logging.info(
+      'Took %s seconds (setup=%s, install=%s, libs=%s, dex=%s, finalize=%s)',
+      main_timer.GetDelta(), setup_timer.GetDelta(), install_timer.GetDelta(),
+      push_native_timer.GetDelta(), push_dex_timer.GetDelta(),
+      finalize_timer.GetDelta())
+  if show_proguard_warning:
+    logging.warning('Target had proguard enabled, but incremental install uses '
+                    'non-proguarded .dex files. Performance characteristics '
+                    'may differ.')
+
+
+def main():
+  parser = argparse.ArgumentParser()
+  parser.add_argument('apk_path',
+                      help='The path to the APK to install.')
+  parser.add_argument('--split',
+                      action='append',
+                      dest='splits',
+                      help='A glob matching the apk splits. '
+                           'Can be specified multiple times.')
+  parser.add_argument('--native_lib',
+                      dest='native_libs',
+                      help='Path to native library (repeatable)',
+                      action='append',
+                      default=[])
+  parser.add_argument('--dex-file',
+                      dest='dex_files',
+                      help='Path to dex files (repeatable)',
+                      action='append',
+                      default=[])
+  parser.add_argument('-d', '--device', dest='device',
+                      help='Target device for apk to install on.')
+  parser.add_argument('--uninstall',
+                      action='store_true',
+                      default=False,
+                      help='Remove the app and all side-loaded files.')
+  parser.add_argument('--output-directory',
+                      help='Path to the root build directory.')
+  parser.add_argument('--no-threading',
+                      action='store_false',
+                      default=True,
+                      dest='threading',
+                      help='Do not install and push concurrently')
+  parser.add_argument('--no-cache',
+                      action='store_false',
+                      default=True,
+                      dest='cache',
+                      help='Do not use cached information about what files are '
+                           'currently on the target device.')
+  parser.add_argument('--show-proguard-warning',
+                      action='store_true',
+                      default=False,
+                      help='Print a warning about proguard being disabled')
+  parser.add_argument('--dont-even-try',
+                      help='Prints this message and exits.')
+  parser.add_argument('-v',
+                      '--verbose',
+                      dest='verbose_count',
+                      default=0,
+                      action='count',
+                      help='Verbose level (multiple times for more)')
+  parser.add_argument('--disable-downgrade',
+                      action='store_false',
+                      default=True,
+                      dest='allow_downgrade',
+                      help='Disable install of apk with lower version number'
+                           'than the version already on the device.')
+
+  args = parser.parse_args()
+
+  run_tests_helper.SetLogLevel(args.verbose_count)
+  constants.SetBuildType('Debug')
+  if args.output_directory:
+    constants.SetOutputDirectory(args.output_directory)
+
+  devil_chromium.Initialize(output_directory=constants.GetOutDirectory())
+
+  if args.dont_even_try:
+    logging.fatal(args.dont_even_try)
+    return 1
+
+  # Retries are annoying when commands fail for legitimate reasons. Might want
+  # to enable them if this is ever used on bots though.
+  device = device_utils.DeviceUtils.HealthyDevices(
+      device_arg=args.device,
+      default_retries=0,
+      enable_device_files_cache=True)[0]
+
+  apk = apk_helper.ToHelper(args.apk_path)
+  if args.uninstall:
+    Uninstall(device, apk.GetPackageName(), enable_device_cache=args.cache)
+  else:
+    Install(device, apk, split_globs=args.splits, native_libs=args.native_libs,
+            dex_files=args.dex_files, enable_device_cache=args.cache,
+            use_concurrency=args.threading,
+            show_proguard_warning=args.show_proguard_warning,
+            allow_downgrade=args.allow_downgrade)
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/build/android/incremental_install/java/org/chromium/incrementalinstall/BootstrapApplication.java b/build/android/incremental_install/java/org/chromium/incrementalinstall/BootstrapApplication.java
new file mode 100644
index 0000000..1fb5e40
--- /dev/null
+++ b/build/android/incremental_install/java/org/chromium/incrementalinstall/BootstrapApplication.java
@@ -0,0 +1,282 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.incrementalinstall;
+
+import android.app.Application;
+import android.app.Instrumentation;
+import android.content.ComponentName;
+import android.content.Context;
+import android.content.pm.ApplicationInfo;
+import android.content.pm.PackageManager;
+import android.content.pm.PackageManager.NameNotFoundException;
+import android.os.Bundle;
+import android.util.Log;
+
+import java.io.File;
+import java.lang.ref.WeakReference;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * An Application that replaces itself with another Application (as defined in
+ * an AndroidManifext.xml meta-data tag). It loads the other application only
+ * after side-loading its .so and .dex files from /data/local/tmp.
+ *
+ * This class is highly dependent on the private implementation details of
+ * Android's ActivityThread.java. However, it has been tested to work with
+ * JellyBean through Marshmallow.
+ */
+public final class BootstrapApplication extends Application {
+    private static final String TAG = "cr.incrementalinstall";
+    private static final String MANAGED_DIR_PREFIX = "/data/local/tmp/incremental-app-";
+    private static final String REAL_APP_META_DATA_NAME = "incremental-install-real-app";
+    private static final String REAL_INSTRUMENTATION_META_DATA_NAME =
+            "incremental-install-real-instrumentation";
+
+    private ClassLoaderPatcher mClassLoaderPatcher;
+    private Application mRealApplication;
+    private Instrumentation mOrigInstrumentation;
+    private Instrumentation mRealInstrumentation;
+    private Object mStashedProviderList;
+    private Object mActivityThread;
+
+    @Override
+    protected void attachBaseContext(Context context) {
+        super.attachBaseContext(context);
+        try {
+            mActivityThread = Reflect.invokeMethod(Class.forName("android.app.ActivityThread"),
+                    "currentActivityThread");
+            mClassLoaderPatcher = new ClassLoaderPatcher(context);
+
+            mOrigInstrumentation =
+                    (Instrumentation) Reflect.getField(mActivityThread, "mInstrumentation");
+            Context instContext = mOrigInstrumentation.getContext();
+            if (instContext == null) {
+                instContext = context;
+            }
+
+            // When running with an instrumentation that lives in a different package from the
+            // application, we must load the dex files and native libraries from both pacakges.
+            // This logic likely won't work when the instrumentation is incremental, but the app is
+            // non-incremental. This configuration isn't used right now though.
+            String appPackageName = getPackageName();
+            String instPackageName = instContext.getPackageName();
+            boolean instPackageNameDiffers = !appPackageName.equals(instPackageName);
+            Log.i(TAG, "App PackageName: " + appPackageName);
+            if (instPackageNameDiffers) {
+                Log.i(TAG, "Inst PackageName: " + instPackageName);
+            }
+
+            File appIncrementalRootDir = new File(MANAGED_DIR_PREFIX + appPackageName);
+            File appLibDir = new File(appIncrementalRootDir, "lib");
+            File appDexDir = new File(appIncrementalRootDir, "dex");
+            File appInstallLockFile = new File(appIncrementalRootDir, "install.lock");
+            File appFirstRunLockFile = new File(appIncrementalRootDir, "firstrun.lock");
+            File instIncrementalRootDir = new File(MANAGED_DIR_PREFIX + instPackageName);
+            File instLibDir = new File(instIncrementalRootDir, "lib");
+            File instDexDir = new File(instIncrementalRootDir, "dex");
+            File instInstallLockFile = new File(instIncrementalRootDir, "install.lock");
+            File instFirstRunLockFile = new File(instIncrementalRootDir , "firstrun.lock");
+
+            boolean isFirstRun = LockFile.installerLockExists(appFirstRunLockFile)
+                    || (instPackageNameDiffers
+                               && LockFile.installerLockExists(instFirstRunLockFile));
+            if (isFirstRun) {
+                if (mClassLoaderPatcher.mIsPrimaryProcess) {
+                    // Wait for incremental_install.py to finish.
+                    LockFile.waitForInstallerLock(appInstallLockFile, 30 * 1000);
+                    LockFile.waitForInstallerLock(instInstallLockFile, 30 * 1000);
+                } else {
+                    // Wait for the browser process to create the optimized dex files
+                    // and copy the library files.
+                    LockFile.waitForInstallerLock(appFirstRunLockFile, 60 * 1000);
+                    LockFile.waitForInstallerLock(instFirstRunLockFile, 60 * 1000);
+                }
+            }
+
+            mClassLoaderPatcher.importNativeLibs(instLibDir);
+            mClassLoaderPatcher.loadDexFiles(instDexDir);
+            if (instPackageNameDiffers) {
+                mClassLoaderPatcher.importNativeLibs(appLibDir);
+                mClassLoaderPatcher.loadDexFiles(appDexDir);
+            }
+
+            if (isFirstRun && mClassLoaderPatcher.mIsPrimaryProcess) {
+                LockFile.clearInstallerLock(appFirstRunLockFile);
+                if (instPackageNameDiffers) {
+                    LockFile.clearInstallerLock(instFirstRunLockFile);
+                }
+            }
+
+            // mInstrumentationAppDir is one of a set of fields that is initialized only when
+            // instrumentation is active.
+            if (Reflect.getField(mActivityThread, "mInstrumentationAppDir") != null) {
+                String realInstrumentationName =
+                        getClassNameFromMetadata(REAL_INSTRUMENTATION_META_DATA_NAME, instContext);
+                initInstrumentation(realInstrumentationName);
+            } else {
+                Log.i(TAG, "No instrumentation active.");
+            }
+
+            // Even when instrumentation is not enabled, ActivityThread uses a default
+            // Instrumentation instance internally. We hook it here in order to hook into the
+            // call to Instrumentation.onCreate().
+            Reflect.setField(mActivityThread, "mInstrumentation",
+                    new BootstrapInstrumentation(this));
+
+            // attachBaseContext() is called from ActivityThread#handleBindApplication() and
+            // Application#mApplication is changed right after we return. Thus, we cannot swap
+            // the Application instances until onCreate() is called.
+            String realApplicationName = getClassNameFromMetadata(REAL_APP_META_DATA_NAME, context);
+            Log.i(TAG, "Instantiating " + realApplicationName);
+            mRealApplication =
+                    (Application) Reflect.newInstance(Class.forName(realApplicationName));
+            Reflect.invokeMethod(mRealApplication, "attachBaseContext", context);
+
+            // Between attachBaseContext() and onCreate(), ActivityThread tries to instantiate
+            // all ContentProviders. The ContentProviders break without the correct Application
+            // class being installed, so temporarily pretend there are no providers, and then
+            // instantiate them explicitly within onCreate().
+            disableContentProviders();
+            Log.i(TAG, "Waiting for Instrumentation.onCreate");
+        } catch (Exception e) {
+            throw new RuntimeException("Incremental install failed.", e);
+        }
+    }
+
+    /**
+     * Returns the fully-qualified class name for the given key, stored in a
+     * &lt;meta&gt; witin the manifest.
+     */
+    private static String getClassNameFromMetadata(String key, Context context)
+            throws NameNotFoundException {
+        String pkgName = context.getPackageName();
+        ApplicationInfo appInfo = context.getPackageManager().getApplicationInfo(pkgName,
+                PackageManager.GET_META_DATA);
+        String value = appInfo.metaData.getString(key);
+        if (value != null && !value.contains(".")) {
+            value = pkgName + "." + value;
+        }
+        return value;
+    }
+
+    /**
+     * Instantiates and initializes mRealInstrumentation (the real Instrumentation class).
+     */
+    private void initInstrumentation(String realInstrumentationName)
+            throws ReflectiveOperationException {
+        if (realInstrumentationName == null) {
+            // This is the case when an incremental app is used as a target for an instrumentation
+            // test. In this case, ActivityThread can instantiate the proper class just fine since
+            // it exists within the test apk (as opposed to the incremental apk-under-test).
+            Log.i(TAG, "Running with external instrumentation");
+            mRealInstrumentation = mOrigInstrumentation;
+            return;
+        }
+        // For unit tests, the instrumentation class is replaced in the manifest by a build step
+        // because ActivityThread tries to instantiate it before we get a chance to load the
+        // incremental dex files.
+        Log.i(TAG, "Instantiating instrumentation " + realInstrumentationName);
+        mRealInstrumentation = (Instrumentation) Reflect.newInstance(
+                Class.forName(realInstrumentationName));
+
+        // Initialize the fields that are set by Instrumentation.init().
+        String[] initFields = {"mThread", "mMessageQueue", "mInstrContext", "mAppContext",
+                "mWatcher", "mUiAutomationConnection"};
+        for (String fieldName : initFields) {
+            Reflect.setField(mRealInstrumentation, fieldName,
+                    Reflect.getField(mOrigInstrumentation, fieldName));
+        }
+        // But make sure the correct ComponentName is used.
+        ComponentName newName = new ComponentName(
+                mOrigInstrumentation.getComponentName().getPackageName(), realInstrumentationName);
+        Reflect.setField(mRealInstrumentation, "mComponent", newName);
+    }
+
+    /**
+     * Called by BootstrapInstrumentation from Instrumentation.onCreate().
+     * This happens regardless of whether or not instrumentation is enabled.
+     */
+    void onInstrumentationCreate(Bundle arguments) {
+        Log.i(TAG, "Instrumentation.onCreate() called. Swapping references.");
+        try {
+            swapApplicationReferences();
+            enableContentProviders();
+            if (mRealInstrumentation != null) {
+                Reflect.setField(mActivityThread, "mInstrumentation", mRealInstrumentation);
+                mRealInstrumentation.onCreate(arguments);
+            }
+        } catch (Exception e) {
+            throw new RuntimeException("Incremental install failed.", e);
+        }
+    }
+
+    @Override
+    public void onCreate() {
+        super.onCreate();
+        try {
+            Log.i(TAG, "Application.onCreate() called.");
+            mRealApplication.onCreate();
+        } catch (Exception e) {
+            throw new RuntimeException("Incremental install failed.", e);
+        }
+    }
+
+    /**
+     * Nulls out ActivityThread.mBoundApplication.providers.
+     */
+    private void disableContentProviders() throws ReflectiveOperationException {
+        Object data = Reflect.getField(mActivityThread, "mBoundApplication");
+        mStashedProviderList = Reflect.getField(data, "providers");
+        Reflect.setField(data, "providers", null);
+    }
+
+    /**
+     * Restores the value of ActivityThread.mBoundApplication.providers, and invokes
+     * ActivityThread#installContentProviders().
+     */
+    private void enableContentProviders() throws ReflectiveOperationException {
+        Object data = Reflect.getField(mActivityThread, "mBoundApplication");
+        Reflect.setField(data, "providers", mStashedProviderList);
+        if (mStashedProviderList != null && mClassLoaderPatcher.mIsPrimaryProcess) {
+            Log.i(TAG, "Instantiating content providers");
+            Reflect.invokeMethod(mActivityThread, "installContentProviders", mRealApplication,
+                    mStashedProviderList);
+        }
+        mStashedProviderList = null;
+    }
+
+    /**
+     * Changes all fields within framework classes that have stored an reference to this
+     * BootstrapApplication to instead store references to mRealApplication.
+     * @throws NoSuchFieldException
+     */
+    @SuppressWarnings("unchecked")
+    private void swapApplicationReferences() throws ReflectiveOperationException {
+        if (Reflect.getField(mActivityThread, "mInitialApplication") == this) {
+            Reflect.setField(mActivityThread, "mInitialApplication", mRealApplication);
+        }
+
+        List<Application> allApplications =
+                (List<Application>) Reflect.getField(mActivityThread, "mAllApplications");
+        for (int i = 0; i < allApplications.size(); i++) {
+            if (allApplications.get(i) == this) {
+                allApplications.set(i, mRealApplication);
+            }
+        }
+
+        for (String fieldName : new String[] { "mPackages", "mResourcePackages" }) {
+            Map<String, WeakReference<?>> packageMap =
+                    (Map<String, WeakReference<?>>) Reflect.getField(mActivityThread, fieldName);
+            for (Map.Entry<String, WeakReference<?>> entry : packageMap.entrySet()) {
+                Object loadedApk = entry.getValue().get();
+                if (loadedApk != null && Reflect.getField(loadedApk, "mApplication") == this) {
+                    Reflect.setField(loadedApk, "mApplication", mRealApplication);
+                    Reflect.setField(mRealApplication, "mLoadedApk", loadedApk);
+                }
+            }
+        }
+    }
+}
diff --git a/build/android/incremental_install/java/org/chromium/incrementalinstall/BootstrapInstrumentation.java b/build/android/incremental_install/java/org/chromium/incrementalinstall/BootstrapInstrumentation.java
new file mode 100644
index 0000000..f197406
--- /dev/null
+++ b/build/android/incremental_install/java/org/chromium/incrementalinstall/BootstrapInstrumentation.java
@@ -0,0 +1,25 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.incrementalinstall;
+
+import android.app.Instrumentation;
+import android.os.Bundle;
+
+/**
+ * Notifies BootstrapApplication of the call to Instrumentation.onCreate().
+ */
+public final class BootstrapInstrumentation extends Instrumentation {
+    private final BootstrapApplication mApp;
+
+    BootstrapInstrumentation(BootstrapApplication app) {
+        mApp = app;
+    }
+
+    @Override
+    public void onCreate(Bundle arguments) {
+        super.onCreate(arguments);
+        mApp.onInstrumentationCreate(arguments);
+    }
+}
diff --git a/build/android/incremental_install/java/org/chromium/incrementalinstall/ClassLoaderPatcher.java b/build/android/incremental_install/java/org/chromium/incrementalinstall/ClassLoaderPatcher.java
new file mode 100644
index 0000000..ac51be9
--- /dev/null
+++ b/build/android/incremental_install/java/org/chromium/incrementalinstall/ClassLoaderPatcher.java
@@ -0,0 +1,246 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.incrementalinstall;
+
+import android.content.Context;
+import android.os.Build;
+import android.util.Log;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * Provides the ability to add native libraries and .dex files to an existing class loader.
+ * Tested with Jellybean MR2 - Marshmellow.
+ */
+final class ClassLoaderPatcher {
+    private static final String TAG = "cr.incrementalinstall";
+    private final File mAppFilesSubDir;
+    private final ClassLoader mClassLoader;
+    private final Object mLibcoreOs;
+    private final int mProcessUid;
+    final boolean mIsPrimaryProcess;
+
+    ClassLoaderPatcher(Context context) throws ReflectiveOperationException {
+        mAppFilesSubDir =
+                new File(context.getApplicationInfo().dataDir, "incremental-install-files");
+        mClassLoader = context.getClassLoader();
+        mLibcoreOs = Reflect.getField(Class.forName("libcore.io.Libcore"), "os");
+        mProcessUid = (Integer) Reflect.invokeMethod(mLibcoreOs, "getuid");
+        mIsPrimaryProcess = context.getApplicationInfo().uid == mProcessUid;
+        Log.i(TAG, "uid=" + mProcessUid + " (isPrimary=" + mIsPrimaryProcess + ")");
+    }
+
+    /**
+     * Loads all dex files within |dexDir| into the app's ClassLoader.
+     */
+    void loadDexFiles(File dexDir) throws ReflectiveOperationException, FileNotFoundException {
+        Log.i(TAG, "Installing dex files from: " + dexDir);
+        File[] dexFilesArr = dexDir.listFiles();
+        if (dexFilesArr == null) {
+            throw new FileNotFoundException("Dex dir does not exist: " + dexDir);
+        }
+        // The optimized dex files will be owned by this process' user.
+        // Store them within the app's data dir rather than on /data/local/tmp
+        // so that they are still deleted (by the OS) when we uninstall
+        // (even on a non-rooted device).
+        File incrementalDexesDir = new File(mAppFilesSubDir, "optimized-dexes");
+        File isolatedDexesDir = new File(mAppFilesSubDir, "isolated-dexes");
+        File optimizedDir;
+
+        if (mIsPrimaryProcess) {
+            ensureAppFilesSubDirExists();
+            // Allows isolated processes to access the same files.
+            incrementalDexesDir.mkdir();
+            incrementalDexesDir.setReadable(true, false);
+            incrementalDexesDir.setExecutable(true, false);
+            // Create a directory for isolated processes to create directories in.
+            isolatedDexesDir.mkdir();
+            isolatedDexesDir.setWritable(true, false);
+            isolatedDexesDir.setExecutable(true, false);
+
+            optimizedDir = incrementalDexesDir;
+        } else {
+            // There is a UID check of the directory in dalvik.system.DexFile():
+            // https://android.googlesource.com/platform/libcore/+/45e0260/dalvik/src/main/java/dalvik/system/DexFile.java#101
+            // Rather than have each isolated process run DexOpt though, we use
+            // symlinks within the directory to point at the browser process'
+            // optimized dex files.
+            optimizedDir = new File(isolatedDexesDir, "isolated-" + mProcessUid);
+            optimizedDir.mkdir();
+            // Always wipe it out and re-create for simplicity.
+            Log.i(TAG, "Creating dex file symlinks for isolated process");
+            for (File f : optimizedDir.listFiles()) {
+                f.delete();
+            }
+            for (File f : incrementalDexesDir.listFiles()) {
+                String to = "../../" + incrementalDexesDir.getName() + "/" + f.getName();
+                File from = new File(optimizedDir, f.getName());
+                createSymlink(to, from);
+            }
+        }
+
+        Log.i(TAG, "Code cache dir: " + optimizedDir);
+        // TODO(agrieve): Might need to record classpath ordering if we ever have duplicate
+        //     class names (since then order will matter here).
+        Log.i(TAG, "Loading " + dexFilesArr.length + " dex files");
+
+        Object dexPathList = Reflect.getField(mClassLoader, "pathList");
+        Object[] dexElements = (Object[]) Reflect.getField(dexPathList, "dexElements");
+        dexElements = addDexElements(dexFilesArr, optimizedDir, dexElements);
+        Reflect.setField(dexPathList, "dexElements", dexElements);
+    }
+
+    /**
+     * Sets up all libraries within |libDir| to be loadable by System.loadLibrary().
+     */
+    void importNativeLibs(File libDir) throws ReflectiveOperationException, IOException {
+        Log.i(TAG, "Importing native libraries from: " + libDir);
+        if (!libDir.exists()) {
+            Log.i(TAG, "No native libs exist.");
+            return;
+        }
+        // The library copying is not necessary on older devices, but we do it anyways to
+        // simplify things (it's fast compared to dexing).
+        // https://code.google.com/p/android/issues/detail?id=79480
+        File localLibsDir = new File(mAppFilesSubDir, "lib");
+        File copyLibsLockFile = new File(mAppFilesSubDir, "libcopy.lock");
+        if (mIsPrimaryProcess) {
+            // Primary process: Copies native libraries into the app's data directory.
+            ensureAppFilesSubDirExists();
+            LockFile lockFile = LockFile.acquireRuntimeLock(copyLibsLockFile);
+            if (lockFile == null) {
+                LockFile.waitForRuntimeLock(copyLibsLockFile, 10 * 1000);
+            } else {
+                try {
+                    localLibsDir.mkdir();
+                    localLibsDir.setReadable(true, false);
+                    localLibsDir.setExecutable(true, false);
+                    copyChangedFiles(libDir, localLibsDir);
+                } finally {
+                    lockFile.release();
+                }
+            }
+        } else {
+            if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
+                // TODO: Work around this issue by using APK splits to install each dex / lib.
+                throw new RuntimeException("Incremental install does not work on Android M+ "
+                        + "with isolated processes. Use the gn arg:\n"
+                        + "    disable_incremental_isolated_processes=true\n"
+                        + "and try again.");
+            }
+            // Other processes: Waits for primary process to finish copying.
+            LockFile.waitForRuntimeLock(copyLibsLockFile, 10 * 1000);
+        }
+        addNativeLibrarySearchPath(localLibsDir);
+    }
+
+    @SuppressWarnings("unchecked")
+    private void addNativeLibrarySearchPath(File nativeLibDir) throws ReflectiveOperationException {
+        Object dexPathList = Reflect.getField(mClassLoader, "pathList");
+        Object currentDirs = Reflect.getField(dexPathList, "nativeLibraryDirectories");
+        File[] newDirs = new File[] { nativeLibDir };
+        // Switched from an array to an ArrayList in Lollipop.
+        if (currentDirs instanceof List) {
+            List<File> dirsAsList = (List<File>) currentDirs;
+            dirsAsList.add(0, nativeLibDir);
+        } else {
+            File[] dirsAsArray = (File[]) currentDirs;
+            Reflect.setField(dexPathList, "nativeLibraryDirectories",
+                    Reflect.concatArrays(newDirs, newDirs, dirsAsArray));
+        }
+
+        Object[] nativeLibraryPathElements;
+        try {
+            nativeLibraryPathElements =
+                    (Object[]) Reflect.getField(dexPathList, "nativeLibraryPathElements");
+        } catch (NoSuchFieldException e) {
+            // This field doesn't exist pre-M.
+            return;
+        }
+        Object[] additionalElements = makeNativePathElements(newDirs);
+        Reflect.setField(dexPathList, "nativeLibraryPathElements",
+                Reflect.concatArrays(nativeLibraryPathElements, additionalElements,
+                        nativeLibraryPathElements));
+    }
+
+    private static void copyChangedFiles(File srcDir, File dstDir) throws IOException {
+        // No need to delete stale libs since libraries are loaded explicitly.
+        int numNotChanged = 0;
+        for (File f : srcDir.listFiles()) {
+            // Note: Tried using hardlinks, but resulted in EACCES exceptions.
+            File dest = new File(dstDir, f.getName());
+            if (!copyIfModified(f, dest)) {
+                numNotChanged++;
+            }
+        }
+        if (numNotChanged > 0) {
+            Log.i(TAG, numNotChanged + " libs already up-to-date.");
+        }
+    }
+
+    private static boolean copyIfModified(File src, File dest) throws IOException {
+        long lastModified = src.lastModified();
+        if (dest.exists() && dest.lastModified() == lastModified) {
+            return false;
+        }
+        Log.i(TAG, "Copying " + src + " -> " + dest);
+        FileInputStream istream = new FileInputStream(src);
+        FileOutputStream ostream = new FileOutputStream(dest);
+        ostream.getChannel().transferFrom(istream.getChannel(), 0, istream.getChannel().size());
+        istream.close();
+        ostream.close();
+        dest.setReadable(true, false);
+        dest.setExecutable(true,  false);
+        dest.setLastModified(lastModified);
+        return true;
+    }
+
+    private void ensureAppFilesSubDirExists() {
+        mAppFilesSubDir.mkdir();
+        mAppFilesSubDir.setExecutable(true, false);
+    }
+
+    private void createSymlink(String to, File from) throws ReflectiveOperationException {
+        Reflect.invokeMethod(mLibcoreOs, "symlink", to, from.getAbsolutePath());
+    }
+
+    private static Object[] makeNativePathElements(File[] paths)
+            throws ReflectiveOperationException {
+        Class<?> entryClazz = Class.forName("dalvik.system.DexPathList$Element");
+        Object[] entries = new Object[paths.length];
+        for (int i = 0; i < paths.length; ++i) {
+            entries[i] = Reflect.newInstance(entryClazz, paths[i], true, null, null);
+        }
+        return entries;
+    }
+
+    private Object[] addDexElements(File[] files, File optimizedDirectory, Object[] curDexElements)
+            throws ReflectiveOperationException {
+        Class<?> entryClazz = Class.forName("dalvik.system.DexPathList$Element");
+        Class<?> clazz = Class.forName("dalvik.system.DexPathList");
+        Object[] ret =
+                Reflect.concatArrays(curDexElements, curDexElements, new Object[files.length]);
+        File emptyDir = new File("");
+        for (int i = 0; i < files.length; ++i) {
+            File file = files[i];
+            Object dexFile;
+            if ("N".equals(Build.VERSION.CODENAME)) {
+                // loadDexFile requires that ret contain all previously added elements.
+                dexFile = Reflect.invokeMethod(clazz, "loadDexFile", file, optimizedDirectory,
+                                               mClassLoader, ret);
+            } else {
+                dexFile = Reflect.invokeMethod(clazz, "loadDexFile", file, optimizedDirectory);
+            }
+            ret[curDexElements.length + i] =
+                    Reflect.newInstance(entryClazz, emptyDir, false, file, dexFile);
+        }
+        return ret;
+    }
+}
diff --git a/build/android/incremental_install/java/org/chromium/incrementalinstall/LockFile.java b/build/android/incremental_install/java/org/chromium/incrementalinstall/LockFile.java
new file mode 100644
index 0000000..6e48f3b
--- /dev/null
+++ b/build/android/incremental_install/java/org/chromium/incrementalinstall/LockFile.java
@@ -0,0 +1,129 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.incrementalinstall;
+
+import android.util.Log;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.nio.channels.FileLock;
+import java.util.concurrent.Callable;
+
+/**
+ * Helpers for dealing with .lock files used during install / first run.
+ */
+final class LockFile {
+    private static final String TAG = "cr.incrementalinstall";
+
+    private final File mFile;
+    private final FileOutputStream mOutputStream;
+    private final FileLock mFileLock;
+
+    private LockFile(File file, FileOutputStream outputStream, FileLock fileLock) {
+        mFile = file;
+        mOutputStream = outputStream;
+        mFileLock = fileLock;
+    }
+
+    /**
+     * Clears the lock file by writing to it (making it non-zero in length);
+     */
+    static void clearInstallerLock(File lockFile) throws IOException {
+        Log.i(TAG, "Clearing " + lockFile);
+        // On Android M+, we can't delete files in /data/local/tmp, so we write to it instead.
+        FileOutputStream os = new FileOutputStream(lockFile);
+        os.write(1);
+        os.close();
+    }
+
+    /**
+     * Waits for the given file to be non-zero in length.
+     */
+    static void waitForInstallerLock(final File file, long timeoutMs) {
+        pollingWait(new Callable<Boolean>() {
+            @Override public Boolean call() {
+                return !installerLockExists(file);
+            }
+        }, file, timeoutMs);
+    }
+
+    /**
+     * Waits for the given file to be non-zero in length.
+     */
+    private static void pollingWait(Callable<Boolean> func, File file, long timeoutMs) {
+        long pollIntervalMs = 200;
+        for (int i = 0; i < timeoutMs / pollIntervalMs; i++) {
+            try {
+                if (func.call()) {
+                    if (i > 0) {
+                        Log.i(TAG, "Finished waiting on lock file: " + file);
+                    }
+                    return;
+                } else if (i == 0) {
+                    Log.i(TAG, "Waiting on lock file: " + file);
+                }
+            } catch (Exception e) {
+                throw new RuntimeException(e);
+            }
+            try {
+                Thread.sleep(pollIntervalMs);
+            } catch (InterruptedException e) {
+                // Should never happen.
+            }
+        }
+        throw new RuntimeException("Timed out waiting for lock file: " + file);
+    }
+
+    /**
+     * Returns whether the given lock file is missing or is in the locked state.
+     */
+    static boolean installerLockExists(File file) {
+        return !file.exists() || file.length() == 0;
+    }
+
+    /**
+     * Attempts to acquire a lock for the given file.
+     * @return Returns the FileLock if it was acquired, or null otherwise.
+     */
+    static LockFile acquireRuntimeLock(File file) {
+        try {
+            FileOutputStream outputStream = new FileOutputStream(file);
+            FileLock lock = outputStream.getChannel().tryLock();
+            if (lock != null) {
+                Log.i(TAG, "Created lock file: " + file);
+                return new LockFile(file, outputStream, lock);
+            }
+            outputStream.close();
+        } catch (IOException e) {
+            // Do nothing. We didn't get the lock.
+            Log.w(TAG, "Exception trying to acquire lock " + file, e);
+        }
+        return null;
+    }
+
+    /**
+     * Waits for the given file to not exist.
+     */
+    static void waitForRuntimeLock(final File file, long timeoutMs) {
+        pollingWait(new Callable<Boolean>() {
+            @Override public Boolean call() {
+                return !file.exists();
+            }
+        }, file, timeoutMs);
+    }
+
+    /**
+     * Releases and deletes the lock file.
+     */
+    void release() throws IOException {
+        Log.i(TAG, "Deleting lock file: " + mFile);
+        mFileLock.release();
+        mOutputStream.close();
+        if (!mFile.delete()) {
+            throw new IOException("Failed to delete lock file: " + mFile);
+        }
+    }
+}
diff --git a/build/android/incremental_install/java/org/chromium/incrementalinstall/Reflect.java b/build/android/incremental_install/java/org/chromium/incrementalinstall/Reflect.java
new file mode 100644
index 0000000..c64dc1e
--- /dev/null
+++ b/build/android/incremental_install/java/org/chromium/incrementalinstall/Reflect.java
@@ -0,0 +1,142 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.incrementalinstall;
+
+import java.lang.reflect.Array;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.util.Arrays;
+
+/**
+ * Reflection helper methods.
+ */
+final class Reflect {
+    /**
+     * Sets the value of an object's field (even if it's not visible).
+     *
+     * @param instance The object containing the field to set.
+     * @param name The name of the field to set.
+     * @param value The new value for the field.
+     */
+    static void setField(Object instance, String name, Object value)
+            throws ReflectiveOperationException {
+        Field field = findField(instance, name);
+        field.setAccessible(true);
+        field.set(instance, value);
+    }
+
+    /**
+     * Retrieves the value of an object's field (even if it's not visible).
+     *
+     * @param instance The object containing the field to set.
+     * @param name The name of the field to set.
+     * @return The field's value. Primitive values are returned as their boxed
+     *         type.
+     */
+    static Object getField(Object instance, String name) throws ReflectiveOperationException {
+        Field field = findField(instance, name);
+        field.setAccessible(true);
+        return field.get(instance);
+    }
+
+    /**
+     * Concatenates two arrays into a new array. The arrays must be of the same
+     * type.
+     */
+    static Object[] concatArrays(Object[] arrType, Object[] left, Object[] right) {
+        Object[] result = (Object[]) Array.newInstance(
+                arrType.getClass().getComponentType(), left.length + right.length);
+        System.arraycopy(left, 0, result, 0, left.length);
+        System.arraycopy(right, 0, result, left.length, right.length);
+        return result;
+    }
+
+    /**
+     * Invokes a method with zero or more parameters. For static methods, use the Class as the
+     * instance.
+     */
+    static Object invokeMethod(Object instance, String name, Object... params)
+            throws ReflectiveOperationException {
+        boolean isStatic = instance instanceof Class;
+        Class<?> clazz = isStatic ? (Class<?>) instance :  instance.getClass();
+        Method method = findMethod(clazz, name, params);
+        method.setAccessible(true);
+        return method.invoke(instance, params);
+    }
+
+    /**
+     * Calls a constructor with zero or more parameters.
+     */
+    static Object newInstance(Class<?> clazz, Object... params)
+            throws ReflectiveOperationException {
+        Constructor<?> constructor = findConstructor(clazz, params);
+        constructor.setAccessible(true);
+        return constructor.newInstance(params);
+    }
+
+    private static Field findField(Object instance, String name) throws NoSuchFieldException {
+        boolean isStatic = instance instanceof Class;
+        Class<?> clazz = isStatic ? (Class<?>) instance :  instance.getClass();
+        for (; clazz != null; clazz = clazz.getSuperclass()) {
+            try {
+                return clazz.getDeclaredField(name);
+            } catch (NoSuchFieldException e) {
+                // Need to look in the super class.
+            }
+        }
+        throw new NoSuchFieldException("Field " + name + " not found in " + instance.getClass());
+    }
+
+    private static Method findMethod(Class<?> clazz, String name, Object... params)
+            throws NoSuchMethodException {
+        for (; clazz != null; clazz = clazz.getSuperclass()) {
+            for (Method method : clazz.getDeclaredMethods()) {
+                if (method.getName().equals(name)
+                        && areParametersCompatible(method.getParameterTypes(), params)) {
+                    return method;
+                }
+            }
+        }
+        throw new NoSuchMethodException("Method " + name + " with parameters "
+                + Arrays.asList(params) + " not found in " + clazz);
+    }
+
+    private static Constructor<?> findConstructor(Class<?> clazz, Object... params)
+            throws NoSuchMethodException {
+        for (Constructor<?> constructor : clazz.getDeclaredConstructors()) {
+            if (areParametersCompatible(constructor.getParameterTypes(), params)) {
+                return constructor;
+            }
+        }
+        throw new NoSuchMethodException("Constructor with parameters " + Arrays.asList(params)
+                + " not found in " + clazz);
+    }
+
+    private static boolean areParametersCompatible(Class<?>[] paramTypes, Object... params) {
+        if (params.length != paramTypes.length) {
+            return false;
+        }
+        for (int i = 0; i < params.length; i++) {
+            if (!isAssignableFrom(paramTypes[i], params[i])) {
+                return false;
+            }
+        }
+        return true;
+    }
+
+    private static boolean isAssignableFrom(Class<?> left, Object right) {
+        if (right == null) {
+            return !left.isPrimitive();
+        }
+        Class<?> rightClazz = right.getClass();
+        if (left.isPrimitive()) {
+            // TODO(agrieve): Fill in the rest as needed.
+            return left == boolean.class && rightClazz == Boolean.class
+                   || left == int.class && rightClazz == Integer.class;
+        }
+        return left.isAssignableFrom(rightClazz);
+    }
+}
diff --git a/build/android/insert_chromium_version.gypi b/build/android/insert_chromium_version.gypi
new file mode 100644
index 0000000..a6ff908
--- /dev/null
+++ b/build/android/insert_chromium_version.gypi
@@ -0,0 +1,53 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This file is meant to be included into an action to provide a rule that
+# inserts a chromium version string into native libraries.
+#
+# To use this, create a gyp target with the following form:
+#  {
+#    'action_name': 'insert_chromium_version',
+#    'actions': [
+#      'variables': {
+#        'ordered_libraries_file': 'file generated by write_ordered_libraries'
+#        'stripped_libraries_dir': 'the directory contains native libraries'
+#        'input_paths': 'files to be added to the list of inputs'
+#        'stamp': 'file to touch when the action is complete'
+#        'version_string': 'chromium version string to be inserted'
+#      'includes': [ '../../build/android/insert_chromium_version.gypi' ],
+#    ],
+#  },
+#
+
+{
+  'message': 'Inserting chromium version string into native libraries',
+  'variables': {
+    'input_paths': [],
+  },
+  'inputs': [
+    '<(DEPTH)/build/android/gyp/util/build_utils.py',
+    '<(DEPTH)/build/android/gyp/insert_chromium_version.py',
+    '<(ordered_libraries_file)',
+    '>@(input_paths)',
+  ],
+  'outputs': [
+    '<(stamp)',
+  ],
+  'action': [
+    'python', '<(DEPTH)/build/android/gyp/insert_chromium_version.py',
+    '--android-objcopy=<(android_objcopy)',
+    '--stripped-libraries-dir=<(stripped_libraries_dir)',
+    '--libraries=@FileArg(<(ordered_libraries_file):libraries)',
+    '--version-string=<(version_string)',
+    '--stamp=<(stamp)',
+  ],
+  'conditions': [
+    ['component == "shared_library"', {
+      # Add a fake output to force the build to always re-run this step. This
+      # is required because the real inputs are not known at gyp-time and
+      # changing base.so may not trigger changes to dependent libraries.
+      'outputs': [ '<(stamp).fake' ]
+    }],
+  ],
+}
diff --git a/build/android/install_emulator_deps.py b/build/android/install_emulator_deps.py
new file mode 100755
index 0000000..acd2093
--- /dev/null
+++ b/build/android/install_emulator_deps.py
@@ -0,0 +1,318 @@
+#!/usr/bin/env python
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Installs deps for using SDK emulator for testing.
+
+The script will download the SDK and system images, if they are not present, and
+install and enable KVM, if virtualization has been enabled in the BIOS.
+"""
+
+
+import logging
+import optparse
+import os
+import re
+import sys
+
+import devil_chromium
+from devil.utils import cmd_helper
+from devil.utils import run_tests_helper
+from pylib import constants
+from pylib import pexpect
+
+# Android API level
+DEFAULT_ANDROID_API_LEVEL = constants.ANDROID_SDK_VERSION
+# Android ABI/Arch
+DEFAULT_ABI = 'x86'
+
+# Default Time out for downloading SDK component
+DOWNLOAD_SYSTEM_IMAGE_TIMEOUT = 30
+DOWNLOAD_SDK_PLATFORM_TIMEOUT = 60
+
+def CheckSDK():
+  """Check if SDK is already installed.
+
+  Returns:
+    True if the emulator SDK directory (src/android_emulator_sdk/) exists.
+  """
+  return os.path.exists(constants.ANDROID_SDK_ROOT)
+
+
+def CheckSDKPlatform(api_level=DEFAULT_ANDROID_API_LEVEL, google=False):
+  """Check if the "SDK Platform" for the specified API level is installed.
+     This is necessary in order for the emulator to run when the target
+     is specified.
+
+  Args:
+    abi: target abi, x86 or arm
+    api_level: the Android API level to check; defaults to the latest API.
+    google: use Google build system image instead of AOSP build
+
+  Returns:
+    True if the platform is already installed.
+  """
+  android_binary = os.path.join(constants.ANDROID_SDK_ROOT, 'tools', 'android')
+  if google:
+    pattern = re.compile('id: [0-9]+ or "Google Inc.:Google APIs:%s"' %
+                         api_level)
+  else:
+    pattern = re.compile('id: [0-9]+ or "android-%d"' % api_level)
+
+  try:
+    exit_code, stdout = cmd_helper.GetCmdStatusAndOutput(
+        [android_binary, 'list'])
+    if exit_code != 0:
+      raise Exception('\'android list\' command failed')
+    for line in stdout.split('\n'):
+      if pattern.match(line):
+        return True
+    return False
+  except OSError:
+    logging.exception('Unable to execute \'android list\'')
+    return False
+
+
+def CheckSystemImage(abi, api_level=DEFAULT_ANDROID_API_LEVEL, google=False):
+  """Check if Android system images have been installed.
+
+  Args:
+    abi: target abi, x86 or arm
+    api_level: the Android API level to check for; defaults to the latest API.
+    google: use Google build system image instead of AOSP build
+
+  Returns:
+    True if x86 image has been previously downloaded.
+  """
+  api_target = 'android-%d' % api_level
+  system_image_root = os.path.join(constants.ANDROID_SDK_ROOT,
+                                   'system-images', api_target)
+  if abi == 'x86':
+    if google:
+      return os.path.exists(os.path.join(system_image_root, 'google_apis',
+                                         'x86'))
+    else:
+      return os.path.exists(os.path.join(system_image_root, 'default', 'x86'))
+  elif abi == 'arm':
+    if google:
+      return os.path.exists(os.path.join(system_image_root, 'google_apis',
+                                         'armeabi-v7a'))
+    else:
+      return os.path.exists(os.path.join(system_image_root, 'default',
+                                         'armeabi-v7a'))
+  else:
+    raise Exception("abi option invalid")
+
+def CheckKVM():
+  """Quickly check whether KVM is enabled.
+
+  Returns:
+    True iff /dev/kvm exists (Linux only).
+  """
+  return os.path.exists('/dev/kvm')
+
+def RunKvmOk():
+  """Run kvm-ok as root to check that KVM is properly enabled after installation
+     of the required packages.
+
+  Returns:
+    True iff KVM is enabled (/dev/kvm exists). On failure, returns False
+    but also print detailed information explaining why KVM isn't enabled
+    (e.g. CPU doesn't support it, or BIOS disabled it).
+  """
+  try:
+    # Note: kvm-ok is in /usr/sbin, so always use 'sudo' to run it.
+    return not cmd_helper.RunCmd(['sudo', 'kvm-ok'])
+  except OSError:
+    logging.info('kvm-ok not installed')
+    return False
+
+
+def InstallKVM():
+  """Installs KVM packages."""
+  rc = cmd_helper.RunCmd(['sudo', 'apt-get', 'install', 'kvm'])
+  if rc:
+    logging.critical('ERROR: Did not install KVM. Make sure hardware '
+                     'virtualization is enabled in BIOS (i.e. Intel VT-x or '
+                     'AMD SVM).')
+  # TODO(navabi): Use modprobe kvm-amd on AMD processors.
+  rc = cmd_helper.RunCmd(['sudo', 'modprobe', 'kvm-intel'])
+  if rc:
+    logging.critical('ERROR: Did not add KVM module to Linux Kernel. Make sure '
+                     'hardware virtualization is enabled in BIOS.')
+  # Now check to ensure KVM acceleration can be used.
+  if not RunKvmOk():
+    logging.critical('ERROR: Can not use KVM acceleration. Make sure hardware '
+                     'virtualization is enabled in BIOS (i.e. Intel VT-x or '
+                     'AMD SVM).')
+
+
+def UpdateSDK(api_level, package_name, package_pattern, timeout):
+  """This function update SDK with a filter index.
+
+  Args:
+    api_level: the Android API level to download for.
+    package_name: logging name of package that is being updated.
+    package_pattern: the pattern to match the filter index from.
+    timeout: the amount of time wait for update command.
+  """
+  android_binary = os.path.join(constants.ANDROID_SDK_ROOT, 'tools', 'android')
+
+  list_sdk_repo_command = [android_binary, 'list', 'sdk', '--all']
+
+  exit_code, stdout = cmd_helper.GetCmdStatusAndOutput(list_sdk_repo_command)
+
+  if exit_code != 0:
+    raise Exception('\'android list sdk --all\' command return %d' % exit_code)
+
+  for line in stdout.split('\n'):
+    match = package_pattern.match(line)
+    if match:
+      index = match.group(1)
+      logging.info('package %s corresponds to %s with api level %d',
+                   index, package_name, api_level)
+      update_command = [android_binary, 'update', 'sdk', '--no-ui', '--all',
+                         '--filter', index]
+      update_command_str = ' '.join(update_command)
+      logging.info('running update command: %s', update_command_str)
+      update_process = pexpect.spawn(update_command_str)
+
+      if update_process.expect('Do you accept the license') != 0:
+        raise Exception('License agreement check failed')
+      update_process.sendline('y')
+      if update_process.expect(
+        'Done. 1 package installed.', timeout=timeout) == 0:
+        logging.info('Successfully installed %s for API level %d',
+                      package_name, api_level)
+        return
+      else:
+        raise Exception('Failed to install platform update')
+  raise Exception('Could not find android-%d update for the SDK!' % api_level)
+
+def GetSystemImage(abi, api_level=DEFAULT_ANDROID_API_LEVEL, google=False):
+  """Download system image files
+
+  Args:
+    abi: target abi, x86 or arm
+    api_level: the Android API level to download for.
+    google: use Google build system image instead of AOSP build
+  """
+  logging.info('Download x86 system image directory into sdk directory.')
+
+  if abi == 'x86':
+    if google:
+      package_name = 'Google Intel x86 Atom System Image'
+      pattern = re.compile(
+         r'\s*([0-9]+)- Google APIs Intel x86 Atom System Image, Google Inc.'
+        ' API %d.*' % api_level)
+    else:
+      package_name = 'Intel x86 system image'
+      pattern = re.compile(
+        r'\s*([0-9]+)- Intel x86 Atom System Image, Android API %d.*'
+        % api_level)
+  elif abi == 'arm':
+    if google:
+      package_name = 'Google arm system image'
+      pattern = re.compile(
+        r'\s*([0-9]+)- Google APIs ARM EABI v7a System Image, Google Inc. API '
+        '%d.*' % api_level)
+    else:
+      package_name = 'Android arm system image'
+      pattern = re.compile(
+        r'\s*([0-9]+)- ARM EABI v7a System Image, Android API %d.*' % api_level)
+  else:
+    raise Exception('abi option is invalid')
+
+  UpdateSDK(api_level, package_name, pattern, DOWNLOAD_SYSTEM_IMAGE_TIMEOUT)
+
+def GetSDKPlatform(api_level=DEFAULT_ANDROID_API_LEVEL, google=False):
+  """Update the SDK to include the platform specified.
+
+  Args:
+    api_level: the Android API level to download
+    google: use Google build system image instead of AOSP build
+  """
+  logging.info('Download SDK Platform directory into sdk directory.')
+
+  platform_package_pattern = re.compile(
+      r'\s*([0-9]+)- SDK Platform Android [\.,0-9]+, API %d.*' % api_level)
+
+  UpdateSDK(api_level, 'SDK Platform', platform_package_pattern,
+            DOWNLOAD_SDK_PLATFORM_TIMEOUT)
+
+  if google:
+    google_api_package_pattern = re.compile(
+      r'\s*([0-9]+)- Google APIs, Android API %d.*' % api_level)
+    UpdateSDK(api_level, 'Google APIs', google_api_package_pattern,
+              DOWNLOAD_SDK_PLATFORM_TIMEOUT)
+
+
+def main(argv):
+  opt_parser = optparse.OptionParser(
+      description='Install dependencies for running the Android emulator')
+  opt_parser.add_option('--abi',
+                        dest='abi',
+                        help='The targeted abi for emulator system image',
+                        type='string',
+                        default=DEFAULT_ABI)
+  opt_parser.add_option('--api-level',
+                        dest='api_level',
+                        help=('The API level (e.g., 19 for Android 4.4) to '
+                              'ensure is available'),
+                        type='int',
+                        default=DEFAULT_ANDROID_API_LEVEL)
+  opt_parser.add_option('-v',
+                        dest='verbosity',
+                        default=1,
+                        action='count',
+                        help='Verbose level (multiple times for more)')
+  opt_parser.add_option('--google',
+                        dest='google',
+                        action='store_true',
+                        default=False,
+                        help='Install Google System Image instead of AOSP')
+
+  options, _ = opt_parser.parse_args(argv[1:])
+
+  run_tests_helper.SetLogLevel(verbose_count=options.verbosity)
+
+  devil_chromium.Initialize()
+
+  # Calls below will download emulator SDK and/or system images only if needed.
+  if CheckSDK():
+    logging.info('android_emulator_sdk/ exists')
+  else:
+    logging.critical('ERROR: Emulator SDK not installed in %s'
+                     , constants.ANDROID_SDK_ROOT)
+    return 1
+
+  # Check target. The target has to be installed in order to run the emulator.
+  if CheckSDKPlatform(options.api_level, options.google):
+    logging.info('SDK platform %s %s android-%d already present, skipping.',
+                 'Google' if options.google else 'AOSP', options.abi,
+                 options.api_level)
+  else:
+    logging.info('SDK platform %s %s android-%d not present, installing.',
+                 'Google' if options.google else 'AOSP', options.abi,
+                 options.api_level)
+    GetSDKPlatform(options.api_level, options.google)
+
+  # Download the system image needed
+  if CheckSystemImage(options.abi, options.api_level, options.google):
+    logging.info('system image for %s %s android-%d already present, skipping.',
+                 'Google' if options.google else 'AOSP', options.abi,
+                 options.api_level)
+  else:
+    GetSystemImage(options.abi, options.api_level, options.google)
+
+  # Make sure KVM packages are installed and enabled.
+  if options.abi == 'x86':
+    if CheckKVM():
+      logging.info('KVM already installed and enabled.')
+    else:
+      logging.warning('KVM is not installed or enabled.')
+
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv))
diff --git a/build/android/java_cpp_enum.gypi b/build/android/java_cpp_enum.gypi
new file mode 100644
index 0000000..d4abafa
--- /dev/null
+++ b/build/android/java_cpp_enum.gypi
@@ -0,0 +1,64 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This file is meant to be included into a target to provide an action
+# to generate Java source files from a C++ header file containing annotated
+# enum definitions using a Python script.
+#
+# To use this, create a gyp target with the following form:
+#  {
+#    'target_name': 'bitmap_format_java',
+#    'type': 'none',
+#    'variables': {
+#      'source_file': 'ui/android/bitmap_format.h',
+#    },
+#    'includes': [ '../build/android/java_cpp_enum.gypi' ],
+#  },
+#
+# Then have the gyp target which compiles the java code depend on the newly
+# created target.
+
+{
+  'variables': {
+    # Location where all generated Java sources will be placed.
+    'output_dir': '<(SHARED_INTERMEDIATE_DIR)/enums/<(_target_name)',
+    'generator_path': '<(DEPTH)/build/android/gyp/java_cpp_enum.py',
+    'generator_args': '<(output_dir) <(source_file)',
+  },
+  'direct_dependent_settings': {
+    'variables': {
+      # Ensure that the output directory is used in the class path
+      # when building targets that depend on this one.
+      'generated_src_dirs': [
+        '<(output_dir)/',
+      ],
+      # Ensure that the targets depending on this one are rebuilt if the sources
+      # of this one are modified.
+      'additional_input_paths': [
+        '<(source_file)',
+      ],
+    },
+  },
+  'actions': [
+    {
+      'action_name': 'generate_java_constants',
+      'inputs': [
+        '<(DEPTH)/build/android/gyp/util/build_utils.py',
+        '<(generator_path)',
+        '<(source_file)',
+      ],
+      'outputs': [
+        # This is the main reason this is an action and not a rule. Gyp doesn't
+        # properly expand RULE_INPUT_PATH here and so it's impossible to
+        # calculate the list of outputs.
+        '<!@pymod_do_main(java_cpp_enum --print_output_only '
+            '<@(generator_args))',
+      ],
+      'action': [
+        'python', '<(generator_path)', '<@(generator_args)'
+      ],
+      'message': 'Generating Java from cpp header <(source_file)',
+    },
+  ],
+}
diff --git a/build/android/java_cpp_template.gypi b/build/android/java_cpp_template.gypi
new file mode 100644
index 0000000..3296659
--- /dev/null
+++ b/build/android/java_cpp_template.gypi
@@ -0,0 +1,81 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This file is meant to be included into a target to provide a rule
+# to generate Java source files from templates that are processed
+# through the host C pre-processor.
+#
+# NOTE: For generating Java conterparts to enums prefer using the java_cpp_enum
+#       rule instead.
+#
+# To use this, create a gyp target with the following form:
+#  {
+#    'target_name': 'android_net_java_constants',
+#    'type': 'none',
+#    'sources': [
+#      'net/android/NetError.template',
+#    ],
+#    'variables': {
+#      'package_name': 'org/chromium/net',
+#      'template_deps': ['base/net_error_list.h'],
+#    },
+#    'includes': [ '../build/android/java_cpp_template.gypi' ],
+#  },
+#
+# The 'sources' entry should only list template file. The template file
+# itself should use the 'ClassName.template' format, and will generate
+# 'gen/templates/<target-name>/<package-name>/ClassName.java. The files which
+# template dependents on and typically included by the template should be listed
+# in template_deps variables. Any change to them will force a rebuild of
+# the template, and hence of any source that depends on it.
+#
+
+{
+  # Location where all generated Java sources will be placed.
+  'variables': {
+    'include_path%': '<(DEPTH)',
+    'output_dir': '<(SHARED_INTERMEDIATE_DIR)/templates/<(_target_name)/<(package_name)',
+  },
+  'direct_dependent_settings': {
+    'variables': {
+      # Ensure that the output directory is used in the class path
+      # when building targets that depend on this one.
+      'generated_src_dirs': [
+        '<(output_dir)/',
+      ],
+      # Ensure dependents are rebuilt when sources for this rule change.
+      'additional_input_paths': [
+        '<@(_sources)',
+        '<@(template_deps)',
+      ],
+    },
+  },
+  # Define a single rule that will be apply to each .template file
+  # listed in 'sources'.
+  'rules': [
+    {
+      'rule_name': 'generate_java_constants',
+      'extension': 'template',
+      # Set template_deps as additional dependencies.
+      'variables': {
+        'output_path': '<(output_dir)/<(RULE_INPUT_ROOT).java',
+      },
+      'inputs': [
+        '<(DEPTH)/build/android/gyp/util/build_utils.py',
+        '<(DEPTH)/build/android/gyp/gcc_preprocess.py',
+        '<@(template_deps)'
+      ],
+      'outputs': [
+        '<(output_path)',
+      ],
+      'action': [
+        'python', '<(DEPTH)/build/android/gyp/gcc_preprocess.py',
+        '--include-path=<(include_path)',
+        '--output=<(output_path)',
+        '--template=<(RULE_INPUT_PATH)',
+      ],
+      'message': 'Generating Java from cpp template <(RULE_INPUT_PATH)',
+    }
+  ],
+}
diff --git a/build/android/java_google_api_keys.gyp b/build/android/java_google_api_keys.gyp
new file mode 100644
index 0000000..df046b6
--- /dev/null
+++ b/build/android/java_google_api_keys.gyp
@@ -0,0 +1,45 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This file provides an action to generate Java source files from the Google
+# API keys using a Python script.
+
+{
+  'targets': [
+    {
+      'target_name': 'google_api_keys_java',
+      'type': 'none',
+      'variables': {
+        # Location where all generated Java sources will be placed.
+        'output_dir': '<(SHARED_INTERMEDIATE_DIR)/java_google_api_keys',
+        'generator_path': '<(DEPTH)/build/android/gyp/java_google_api_keys.py',
+        'output_file': '<(output_dir)/GoogleAPIKeys.java',
+      },
+      'direct_dependent_settings': {
+        'variables': {
+          # Ensure that the output directory is used in the class path
+          # when building targets that depend on this one.
+          'generated_src_dirs': [
+            '<(output_dir)/',
+          ],
+        },
+      },
+      'actions': [
+        {
+          'action_name': 'generate_java_google_api_keys',
+          'inputs': [
+            '<(generator_path)',
+          ],
+          'outputs': [
+            '<(output_file)',
+          ],
+          'action': [
+            'python', '<(generator_path)', '--out', '<(output_file)'
+          ],
+          'message': 'Generating Java from Google API Keys header',
+        },
+      ],
+    },
+  ],
+}
diff --git a/build/android/jinja_template.gypi b/build/android/jinja_template.gypi
new file mode 100644
index 0000000..7fcddd6
--- /dev/null
+++ b/build/android/jinja_template.gypi
@@ -0,0 +1,85 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This file is meant to be included into a target to process one or more
+# Jinja templates.
+#
+# To process a single template file, create a gyp target with the following
+# form:
+#  {
+#    'target_name': 'chrome_public_manifest',
+#    'type': 'none',
+#    'variables': {
+#      'jinja_inputs': ['android/java/AndroidManifest.xml'],
+#      'jinja_output': '<(SHARED_INTERMEDIATE_DIR)/chrome_public_manifest/AndroidManifest.xml',
+#      'jinja_variables': ['app_name=ChromePublic'],
+#    },
+#    'includes': [ '../build/android/jinja_template.gypi' ],
+#  },
+#
+# To process multiple template files and package the results into a zip file,
+# create a gyp target with the following form:
+#  {
+#    'target_name': 'chrome_template_resources',
+#    'type': 'none',
+#    'variables': {
+#       'jinja_inputs_base_dir': 'android/java/res_template',
+#       'jinja_inputs': [
+#         '<(jinja_inputs_base_dir)/xml/searchable.xml',
+#         '<(jinja_inputs_base_dir)/xml/syncadapter.xml',
+#       ],
+#       'jinja_outputs_zip': '<(PRODUCT_DIR)/res.java/<(_target_name).zip',
+#       'jinja_variables': ['app_name=ChromePublic'],
+#     },
+#     'includes': [ '../build/android/jinja_template.gypi' ],
+#   },
+#
+
+{
+  'actions': [
+    {
+      'action_name': '<(_target_name)_jinja_template',
+      'message': 'processing jinja template',
+      'variables': {
+        'jinja_output%': '',
+        'jinja_outputs_zip%': '',
+        'jinja_inputs_base_dir%': '',
+        'jinja_includes%': [],
+        'jinja_variables%': [],
+        'jinja_args': [],
+      },
+      'inputs': [
+        '<(DEPTH)/build/android/gyp/util/build_utils.py',
+        '<(DEPTH)/build/android/gyp/jinja_template.py',
+        '<@(jinja_inputs)',
+        '<@(jinja_includes)',
+      ],
+      'conditions': [
+        ['jinja_output != ""', {
+          'outputs': [ '<(jinja_output)' ],
+          'variables': {
+            'jinja_args': ['--output', '<(jinja_output)'],
+          },
+        }],
+        ['jinja_outputs_zip != ""', {
+          'outputs': [ '<(jinja_outputs_zip)' ],
+          'variables': {
+            'jinja_args': ['--outputs-zip', '<(jinja_outputs_zip)'],
+          },
+        }],
+        ['jinja_inputs_base_dir != ""', {
+          'variables': {
+            'jinja_args': ['--inputs-base-dir', '<(jinja_inputs_base_dir)'],
+          },
+        }],
+      ],
+      'action': [
+        'python', '<(DEPTH)/build/android/gyp/jinja_template.py',
+        '--inputs', '<(jinja_inputs)',
+        '--variables', '<(jinja_variables)',
+        '<@(jinja_args)',
+      ],
+    },
+  ],
+}
diff --git a/build/android/lighttpd_server.py b/build/android/lighttpd_server.py
new file mode 100755
index 0000000..5c2dde8
--- /dev/null
+++ b/build/android/lighttpd_server.py
@@ -0,0 +1,256 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Provides a convenient wrapper for spawning a test lighttpd instance.
+
+Usage:
+  lighttpd_server PATH_TO_DOC_ROOT
+"""
+
+import codecs
+import contextlib
+import httplib
+import os
+import random
+import shutil
+import socket
+import subprocess
+import sys
+import tempfile
+import time
+
+from pylib import constants
+from pylib import pexpect
+
+class LighttpdServer(object):
+  """Wraps lighttpd server, providing robust startup.
+
+  Args:
+    document_root: Path to root of this server's hosted files.
+    port: TCP port on the _host_ machine that the server will listen on. If
+        ommitted it will attempt to use 9000, or if unavailable it will find
+        a free port from 8001 - 8999.
+    lighttpd_path, lighttpd_module_path: Optional paths to lighttpd binaries.
+    base_config_path: If supplied this file will replace the built-in default
+        lighttpd config file.
+    extra_config_contents: If specified, this string will be appended to the
+        base config (default built-in, or from base_config_path).
+    config_path, error_log, access_log: Optional paths where the class should
+        place temprary files for this session.
+  """
+
+  def __init__(self, document_root, port=None,
+               lighttpd_path=None, lighttpd_module_path=None,
+               base_config_path=None, extra_config_contents=None,
+               config_path=None, error_log=None, access_log=None):
+    self.temp_dir = tempfile.mkdtemp(prefix='lighttpd_for_chrome_android')
+    self.document_root = os.path.abspath(document_root)
+    self.fixed_port = port
+    self.port = port or constants.LIGHTTPD_DEFAULT_PORT
+    self.server_tag = 'LightTPD ' + str(random.randint(111111, 999999))
+    self.lighttpd_path = lighttpd_path or '/usr/sbin/lighttpd'
+    self.lighttpd_module_path = lighttpd_module_path or '/usr/lib/lighttpd'
+    self.base_config_path = base_config_path
+    self.extra_config_contents = extra_config_contents
+    self.config_path = config_path or self._Mktmp('config')
+    self.error_log = error_log or self._Mktmp('error_log')
+    self.access_log = access_log or self._Mktmp('access_log')
+    self.pid_file = self._Mktmp('pid_file')
+    self.process = None
+
+  def _Mktmp(self, name):
+    return os.path.join(self.temp_dir, name)
+
+  @staticmethod
+  def _GetRandomPort():
+    # The ports of test server is arranged in constants.py.
+    return random.randint(constants.LIGHTTPD_RANDOM_PORT_FIRST,
+                          constants.LIGHTTPD_RANDOM_PORT_LAST)
+
+  def StartupHttpServer(self):
+    """Starts up a http server with specified document root and port."""
+    # If we want a specific port, make sure no one else is listening on it.
+    if self.fixed_port:
+      self._KillProcessListeningOnPort(self.fixed_port)
+    while True:
+      if self.base_config_path:
+        # Read the config
+        with codecs.open(self.base_config_path, 'r', 'utf-8') as f:
+          config_contents = f.read()
+      else:
+        config_contents = self._GetDefaultBaseConfig()
+      if self.extra_config_contents:
+        config_contents += self.extra_config_contents
+      # Write out the config, filling in placeholders from the members of |self|
+      with codecs.open(self.config_path, 'w', 'utf-8') as f:
+        f.write(config_contents % self.__dict__)
+      if (not os.path.exists(self.lighttpd_path) or
+          not os.access(self.lighttpd_path, os.X_OK)):
+        raise EnvironmentError(
+            'Could not find lighttpd at %s.\n'
+            'It may need to be installed (e.g. sudo apt-get install lighttpd)'
+            % self.lighttpd_path)
+      self.process = pexpect.spawn(self.lighttpd_path,
+                                   ['-D', '-f', self.config_path,
+                                    '-m', self.lighttpd_module_path],
+                                   cwd=self.temp_dir)
+      client_error, server_error = self._TestServerConnection()
+      if not client_error:
+        assert int(open(self.pid_file, 'r').read()) == self.process.pid
+        break
+      self.process.close()
+
+      if self.fixed_port or not 'in use' in server_error:
+        print 'Client error:', client_error
+        print 'Server error:', server_error
+        return False
+      self.port = self._GetRandomPort()
+    return True
+
+  def ShutdownHttpServer(self):
+    """Shuts down our lighttpd processes."""
+    if self.process:
+      self.process.terminate()
+    shutil.rmtree(self.temp_dir, ignore_errors=True)
+
+  def _TestServerConnection(self):
+    # Wait for server to start
+    server_msg = ''
+    for timeout in xrange(1, 5):
+      client_error = None
+      try:
+        with contextlib.closing(httplib.HTTPConnection(
+            '127.0.0.1', self.port, timeout=timeout)) as http:
+          http.set_debuglevel(timeout > 3)
+          http.request('HEAD', '/')
+          r = http.getresponse()
+          r.read()
+          if (r.status == 200 and r.reason == 'OK' and
+              r.getheader('Server') == self.server_tag):
+            return (None, server_msg)
+          client_error = ('Bad response: %s %s version %s\n  ' %
+                          (r.status, r.reason, r.version) +
+                          '\n  '.join([': '.join(h) for h in r.getheaders()]))
+      except (httplib.HTTPException, socket.error) as client_error:
+        pass  # Probably too quick connecting: try again
+      # Check for server startup error messages
+      ix = self.process.expect([pexpect.TIMEOUT, pexpect.EOF, '.+'],
+                               timeout=timeout)
+      if ix == 2:  # stdout spew from the server
+        server_msg += self.process.match.group(0) # pylint: disable=no-member
+      elif ix == 1:  # EOF -- server has quit so giveup.
+        client_error = client_error or 'Server exited'
+        break
+    return (client_error or 'Timeout', server_msg)
+
+  @staticmethod
+  def _KillProcessListeningOnPort(port):
+    """Checks if there is a process listening on port number |port| and
+    terminates it if found.
+
+    Args:
+      port: Port number to check.
+    """
+    if subprocess.call(['fuser', '-kv', '%d/tcp' % port]) == 0:
+      # Give the process some time to terminate and check that it is gone.
+      time.sleep(2)
+      assert subprocess.call(['fuser', '-v', '%d/tcp' % port]) != 0, \
+          'Unable to kill process listening on port %d.' % port
+
+  @staticmethod
+  def _GetDefaultBaseConfig():
+    return """server.tag                  = "%(server_tag)s"
+server.modules              = ( "mod_access",
+                                "mod_accesslog",
+                                "mod_alias",
+                                "mod_cgi",
+                                "mod_rewrite" )
+
+# default document root required
+#server.document-root = "."
+
+# files to check for if .../ is requested
+index-file.names            = ( "index.php", "index.pl", "index.cgi",
+                                "index.html", "index.htm", "default.htm" )
+# mimetype mapping
+mimetype.assign             = (
+  ".gif"          =>      "image/gif",
+  ".jpg"          =>      "image/jpeg",
+  ".jpeg"         =>      "image/jpeg",
+  ".png"          =>      "image/png",
+  ".svg"          =>      "image/svg+xml",
+  ".css"          =>      "text/css",
+  ".html"         =>      "text/html",
+  ".htm"          =>      "text/html",
+  ".xhtml"        =>      "application/xhtml+xml",
+  ".xhtmlmp"      =>      "application/vnd.wap.xhtml+xml",
+  ".js"           =>      "application/x-javascript",
+  ".log"          =>      "text/plain",
+  ".conf"         =>      "text/plain",
+  ".text"         =>      "text/plain",
+  ".txt"          =>      "text/plain",
+  ".dtd"          =>      "text/xml",
+  ".xml"          =>      "text/xml",
+  ".manifest"     =>      "text/cache-manifest",
+ )
+
+# Use the "Content-Type" extended attribute to obtain mime type if possible
+mimetype.use-xattr          = "enable"
+
+##
+# which extensions should not be handle via static-file transfer
+#
+# .php, .pl, .fcgi are most often handled by mod_fastcgi or mod_cgi
+static-file.exclude-extensions = ( ".php", ".pl", ".cgi" )
+
+server.bind = "127.0.0.1"
+server.port = %(port)s
+
+## virtual directory listings
+dir-listing.activate        = "enable"
+#dir-listing.encoding       = "iso-8859-2"
+#dir-listing.external-css   = "style/oldstyle.css"
+
+## enable debugging
+#debug.log-request-header   = "enable"
+#debug.log-response-header  = "enable"
+#debug.log-request-handling = "enable"
+#debug.log-file-not-found   = "enable"
+
+#### SSL engine
+#ssl.engine                 = "enable"
+#ssl.pemfile                = "server.pem"
+
+# Autogenerated test-specific config follows.
+
+cgi.assign = ( ".cgi"  => "/usr/bin/env",
+               ".pl"   => "/usr/bin/env",
+               ".asis" => "/bin/cat",
+               ".php"  => "/usr/bin/php-cgi" )
+
+server.errorlog = "%(error_log)s"
+accesslog.filename = "%(access_log)s"
+server.upload-dirs = ( "/tmp" )
+server.pid-file = "%(pid_file)s"
+server.document-root = "%(document_root)s"
+
+"""
+
+
+def main(argv):
+  server = LighttpdServer(*argv[1:])
+  try:
+    if server.StartupHttpServer():
+      raw_input('Server running at http://127.0.0.1:%s -'
+                ' press Enter to exit it.' % server.port)
+    else:
+      print 'Server exit code:', server.process.exitstatus
+  finally:
+    server.ShutdownHttpServer()
+
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv))
diff --git a/build/android/lint/suppress.py b/build/android/lint/suppress.py
new file mode 100755
index 0000000..3926d6d
--- /dev/null
+++ b/build/android/lint/suppress.py
@@ -0,0 +1,127 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Add all generated lint_result.xml files to suppressions.xml"""
+
+# pylint: disable=no-member
+
+
+import collections
+import optparse
+import os
+import sys
+from xml.dom import minidom
+
+_BUILD_ANDROID_DIR = os.path.join(os.path.dirname(__file__), '..')
+sys.path.append(_BUILD_ANDROID_DIR)
+
+from pylib.constants import host_paths
+
+
+_THIS_FILE = os.path.abspath(__file__)
+_CONFIG_PATH = os.path.join(os.path.dirname(_THIS_FILE), 'suppressions.xml')
+_DOC = (
+    '\nSTOP! It looks like you want to suppress some lint errors:\n'
+    '- Have you tried identifing the offending patch?\n'
+    '  Ask the author for a fix and/or revert the patch.\n'
+    '- It is preferred to add suppressions in the code instead of\n'
+    '  sweeping it under the rug here. See:\n\n'
+    '    http://developer.android.com/tools/debugging/improving-w-lint.html\n'
+    '\n'
+    'Still reading?\n'
+    '- You can edit this file manually to suppress an issue\n'
+    '  globally if it is not applicable to the project.\n'
+    '- You can also automatically add issues found so for in the\n'
+    '  build process by running:\n\n'
+    '    ' + os.path.relpath(_THIS_FILE, host_paths.DIR_SOURCE_ROOT) + '\n\n'
+    '  which will generate this file (Comments are not preserved).\n'
+    '  Note: PRODUCT_DIR will be substituted at run-time with actual\n'
+    '  directory path (e.g. out/Debug)\n'
+)
+
+
+_Issue = collections.namedtuple('Issue', ['severity', 'paths', 'regexps'])
+
+
+def _ParseConfigFile(config_path):
+  print 'Parsing %s' % config_path
+  issues_dict = {}
+  dom = minidom.parse(config_path)
+  for issue in dom.getElementsByTagName('issue'):
+    issue_id = issue.attributes['id'].value
+    severity = issue.getAttribute('severity')
+
+    path_elements = (
+        p.attributes.get('path')
+        for p in issue.getElementsByTagName('ignore'))
+    paths = set(p.value for p in path_elements if p)
+
+    regexp_elements = (
+        p.attributes.get('regexp')
+        for p in issue.getElementsByTagName('ignore'))
+    regexps = set(r.value for r in regexp_elements if r)
+
+    issues_dict[issue_id] = _Issue(severity, paths, regexps)
+  return issues_dict
+
+
+def _ParseAndMergeResultFile(result_path, issues_dict):
+  print 'Parsing and merging %s' % result_path
+  dom = minidom.parse(result_path)
+  for issue in dom.getElementsByTagName('issue'):
+    issue_id = issue.attributes['id'].value
+    severity = issue.attributes['severity'].value
+    path = issue.getElementsByTagName('location')[0].attributes['file'].value
+    if issue_id not in issues_dict:
+      issues_dict[issue_id] = _Issue(severity, set(), set())
+    issues_dict[issue_id].paths.add(path)
+
+
+def _WriteConfigFile(config_path, issues_dict):
+  new_dom = minidom.getDOMImplementation().createDocument(None, 'lint', None)
+  top_element = new_dom.documentElement
+  top_element.appendChild(new_dom.createComment(_DOC))
+  for issue_id, issue in sorted(issues_dict.iteritems(), key=lambda i: i[0]):
+    issue_element = new_dom.createElement('issue')
+    issue_element.attributes['id'] = issue_id
+    if issue.severity:
+      issue_element.attributes['severity'] = issue.severity
+    if issue.severity == 'ignore':
+      print 'Warning: [%s] is suppressed globally.' % issue_id
+    else:
+      for path in sorted(issue.paths):
+        ignore_element = new_dom.createElement('ignore')
+        ignore_element.attributes['path'] = path
+        issue_element.appendChild(ignore_element)
+      for regexp in sorted(issue.regexps):
+        ignore_element = new_dom.createElement('ignore')
+        ignore_element.attributes['regexp'] = regexp
+        issue_element.appendChild(ignore_element)
+    top_element.appendChild(issue_element)
+
+  with open(config_path, 'w') as f:
+    f.write(new_dom.toprettyxml(indent='  ', encoding='utf-8'))
+  print 'Updated %s' % config_path
+
+
+def _Suppress(config_path, result_path):
+  issues_dict = _ParseConfigFile(config_path)
+  _ParseAndMergeResultFile(result_path, issues_dict)
+  _WriteConfigFile(config_path, issues_dict)
+
+
+def main():
+  parser = optparse.OptionParser(usage='%prog RESULT-FILE')
+  _, args = parser.parse_args()
+
+  if len(args) != 1 or not os.path.exists(args[0]):
+    parser.error('Must provide RESULT-FILE')
+
+  _Suppress(_CONFIG_PATH, args[0])
+
+
+if __name__ == '__main__':
+  main()
diff --git a/build/android/lint/suppressions.xml b/build/android/lint/suppressions.xml
new file mode 100644
index 0000000..db42846
--- /dev/null
+++ b/build/android/lint/suppressions.xml
@@ -0,0 +1,120 @@
+<?xml version="1.0" encoding="utf-8"?>
+<lint>
+  <!--
+STOP! It looks like you want to suppress some lint errors:
+- Have you tried identifing the offending patch?
+  Ask the author for a fix and/or revert the patch.
+- It is preferred to add suppressions in the code instead of
+  sweeping it under the rug here. See:
+
+    http://developer.android.com/tools/debugging/improving-w-lint.html
+
+Still reading?
+- You can edit this file manually to suppress an issue
+  globally if it is not applicable to the project.
+- You can also automatically add issues found so for in the
+  build process by running:
+
+    build/android/lint/suppress.py
+
+  which will generate this file (Comments are not preserved).
+  Note: PRODUCT_DIR will be substituted at run-time with actual
+  directory path (e.g. out/Debug)
+-->
+  <issue id="AllowBackup">
+    <ignore path="AndroidManifest.xml"/>
+  </issue>
+  <issue id="Assert" severity="ignore"/>
+  <issue id="CommitPrefEdits">
+    <ignore path="third_party/cacheinvalidation/src/java/com/google/ipc/invalidation/ticl/android2/channel/AndroidChannelPreferences.java"/>
+  </issue>
+  <issue id="DefaultLocale">
+    <ignore path="third_party/cacheinvalidation/src/java/com/google/ipc/invalidation/external/client/contrib/AndroidListenerState.java"/>
+  </issue>
+  <issue id="DrawAllocation">
+    <ignore path="content/public/android/java/src/org/chromium/content/browser/ContentViewRenderView.java"/>
+    <ignore path="content/public/android/java/src/org/chromium/content/browser/PopupZoomer.java"/>
+  </issue>
+  <issue id="ExportedContentProvider">
+    <ignore path="AndroidManifest.xml"/>
+  </issue>
+  <issue id="HandlerLeak">
+    <ignore path="remoting/android/java/src/org/chromium/chromoting/TapGestureDetector.java"/>
+  </issue>
+  <issue id="IconMissingDensityFolder">
+    <!-- see crbug.com/542435 -->
+    <ignore path="android_webview/apk/java/res" />
+  </issue>
+  <issue id="IconDensities">
+    <!-- crbug.com/457918 is tracking missing assets -->
+    <ignore path="components/web_contents_delegate_android/android/java/res/drawable-xxhdpi"/>
+    <ignore path="components/web_contents_delegate_android/android/java/res/drawable-xxxhdpi"/>
+    <ignore path="content/public/android/java/res/drawable-xxhdpi"/>
+    <ignore path="content/public/android/java/res/drawable-xxxhdpi"/>
+    <ignore path="chrome/android/java/res/drawable-xxhdpi"/>
+    <ignore path="chrome/android/java/res/drawable-xxxhdpi"/>
+    <ignore path="ui/android/java/res/drawable-xxhdpi"/>
+    <ignore path="ui/android/java/res/drawable-xxxhdpi"/>
+    <!-- The large assets below only include a few densities to reduce APK size. -->
+    <ignore regexp=".*: data_reduction_illustration.png, google_icon_sprite.png, physical_web_logo.png, physical_web_logo_anim1.png, physical_web_logo_anim2.png$"/>
+  </issue>
+  <issue id="IconDipSize">
+    <ignore regexp=".*google_icon_sprite.png.*"/>
+  </issue>
+  <issue id="IconLocation">
+    <!-- It is OK for content_shell_apk to have missing assets. -->
+    <ignore path="content/shell/android/java/res/"/>
+    <!-- Suppression for chrome/test/chromedriver/test/webview_shell/java/res/drawable/icon.png -->
+    <ignore path="res/drawable/icon.png"/>
+    <!-- TODO(lambroslambrou) remove this once crbug.com/502030 is fixed. -->
+    <ignore path="remoting/android/java/res"/>
+  </issue>
+  <issue id="InconsistentLayout" severity="ignore"/>
+  <issue id="InflateParams" severity="ignore"/>
+  <issue id="LongLogTag" severity="ignore"/>
+  <issue id="MissingApplicationIcon" severity="ignore"/>
+  <issue id="MissingPermission" severity="ignore"/>
+  <issue id="MissingRegistered" severity="ignore"/>
+  <issue id="MissingVersion">
+    <ignore path="AndroidManifest.xml"/>
+  </issue>
+  <issue id="InlinedApi" severity="ignore"/>
+  <issue id="NewApi">
+    <ignore regexp="Attribute `paddingStart` referenced here can result in a crash on some specific devices older than API 17"/>
+    <ignore path="org/chromium/base/AnimationFrameTimeHistogram$Recorder.class"/>
+    <ignore path="org/chromium/base/JavaHandlerThread.class"/>
+    <ignore path="org/chromium/base/SysUtils.class"/>
+    <ignore path="org/chromium/chrome/browser/TtsPlatformImpl.class"/>
+    <ignore path="org/chromium/chrome/browser/TtsPlatformImpl$*.class"/>
+    <ignore path="chrome/android/java/res/values-v17/styles.xml"/>
+  </issue>
+  <issue id="OldTargetApi">
+    <ignore path="AndroidManifest.xml"/>
+  </issue>
+  <issue id="Overdraw" severity="ignore"/>
+  <issue id="Recycle" severity="ignore"/>
+  <issue id="Registered" severity="ignore"/>
+  <issue id="RtlCompat" severity="ignore"/>
+  <issue id="RtlEnabled" severity="ignore"/>
+  <issue id="RtlSymmetry" severity="ignore"/>
+  <issue id="SdCardPath">
+    <ignore path="content/public/android/java/src/org/chromium/content/browser/MediaResourceGetter.java"/>
+  </issue>
+  <issue id="SetJavaScriptEnabled" severity="ignore"/>
+  <issue id="UnusedResources">
+    <!-- Used by chrome/android/java/AndroidManifest.xml -->
+    <ignore path="chrome/android/java/res/drawable/window_background.xml" />
+    <ignore path="chrome/android/java/res/xml/bookmark_widget_info.xml" />
+    <ignore path="chrome/android/java/res/xml/file_paths.xml" />
+
+    <ignore path="content/shell/android/shell_apk/res/layout/content_shell_activity.xml" />
+    <ignore path="content/shell/android/shell_apk/res/values/strings.xml" />
+  </issue>
+  <issue id="SignatureOrSystemPermissions" severity="ignore"/>
+  <issue id="UnusedAttribute" severity="ignore"/>
+  <issue id="ViewConstructor" severity="ignore"/>
+  <issue id="WrongCall" severity="ignore"/>
+  <issue id="UselessParent">
+    <ignore path="chrome/android/java/res/layout/data_reduction_promo_screen.xml" />
+  </issue>
+</lint>
diff --git a/build/android/lint_action.gypi b/build/android/lint_action.gypi
new file mode 100644
index 0000000..3826662
--- /dev/null
+++ b/build/android/lint_action.gypi
@@ -0,0 +1,51 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This file is meant to be included into an action to provide a rule to
+# run lint on java/class files.
+
+{
+  'action_name': 'lint_<(_target_name)',
+  'message': 'Linting <(_target_name)',
+  'variables': {
+    'conditions': [
+      ['chromium_code != 0 and android_lint != 0 and never_lint == 0', {
+        'additional_args': ['--enable'],
+      }, {
+        'additional_args': [],
+      }]
+    ],
+    'android_lint_cache_stamp': '<(PRODUCT_DIR)/android_lint_cache/android_lint_cache.stamp',
+    'android_manifest_path%': '<(DEPTH)/build/android/AndroidManifest.xml',
+    'resource_dir%': '<(DEPTH)/build/android/ant/empty/res',
+    'suppressions_file%': '<(DEPTH)/build/android/lint/suppressions.xml',
+    'platform_xml_path': '<(android_sdk_root)/platform-tools/api/api-versions.xml',
+  },
+  'inputs': [
+    '<(DEPTH)/build/android/gyp/util/build_utils.py',
+    '<(DEPTH)/build/android/gyp/lint.py',
+    '<(android_lint_cache_stamp)',
+    '<(android_manifest_path)',
+    '<(lint_jar_path)',
+    '<(suppressions_file)',
+    '<(platform_xml_path)',
+  ],
+  'action': [
+    'python', '<(DEPTH)/build/android/gyp/lint.py',
+    '--lint-path=<(android_sdk_root)/tools/lint',
+    '--config-path=<(suppressions_file)',
+    '--processed-config-path=<(config_path)',
+    '--cache-dir', '<(PRODUCT_DIR)/android_lint_cache',
+    '--platform-xml-path', '<(platform_xml_path)',
+    '--manifest-path=<(android_manifest_path)',
+    '--result-path=<(result_path)',
+    '--resource-dir=<(resource_dir)',
+    '--product-dir=<(PRODUCT_DIR)',
+    '--src-dirs=>(src_dirs)',
+    '--jar-path=<(lint_jar_path)',
+    '--can-fail-build',
+    '--stamp=<(stamp_path)',
+    '<@(additional_args)',
+  ],
+}
diff --git a/build/android/locale_pak_resources.gypi b/build/android/locale_pak_resources.gypi
new file mode 100644
index 0000000..020b831
--- /dev/null
+++ b/build/android/locale_pak_resources.gypi
@@ -0,0 +1,54 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Creates a resources.zip with locale.pak files placed into appropriate
+# resource configs (e.g. en-GB.pak -> res/raw-en/en_gb.pak). Also generates
+# a locale_paks TypedArray so that resource files can be enumerated at runtime.
+#
+# If this target is included in the deps of an android resources/library/apk,
+# the resources will be included with that target.
+#
+# Variables:
+#   locale_pak_files - List of .pak files to process.
+#     Names must be of the form "en.pak" or "en-US.pak".
+#   resource_zip_path - the path of generated zip file, optional, normally, you
+#     don't need to set this variable.
+#
+# Example
+#  {
+#    'target_name': 'my_locale_resources',
+#    'type': 'none',
+#    'variables': {
+#      'locale_paks_files': ['path1/fr.pak'],
+#    },
+#    'includes': [ '../build/android/locale_pak_resources.gypi' ],
+#  },
+#
+{
+  'variables': {
+    'resources_zip_path%': '<(PRODUCT_DIR)/res.java/<(_target_name).zip',
+  },
+  'all_dependent_settings': {
+    'variables': {
+      'additional_locale_input_paths': ['<(resources_zip_path)'],
+      'dependencies_locale_zip_paths': ['<(resources_zip_path)'],
+    },
+  },
+  'actions': [{
+    'action_name': '<(_target_name)_locale_pak_resources',
+    'inputs': [
+      '<(DEPTH)/build/android/gyp/util/build_utils.py',
+      '<(DEPTH)/build/android/gyp/locale_pak_resources.py',
+      '<@(locale_pak_files)',
+    ],
+    'outputs': [
+      '<(resources_zip_path)',
+    ],
+    'action': [
+      'python', '<(DEPTH)/build/android/gyp/locale_pak_resources.py',
+      '--locale-paks', '<(locale_pak_files)',
+      '--resources-zip', '<(resources_zip_path)',
+    ],
+  }],
+}
diff --git a/build/android/main_dex_action.gypi b/build/android/main_dex_action.gypi
new file mode 100644
index 0000000..4076418
--- /dev/null
+++ b/build/android/main_dex_action.gypi
@@ -0,0 +1,46 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This file is meant to be included into an action to provide a rule that
+# generates a list of classes that must be kept in the main dex file.
+#
+# To use this, create a gyp target with the following form:
+#  {
+#    'action_name': 'some name for the action'
+#    'actions': [
+#      'variables': {
+#        'jar_paths': ['path to jar', ...],
+#        'output_path': 'output path',
+#      },
+#      'includes': [ 'relative/path/to/main_dex_action.gypi' ],
+#    ],
+#  },
+#
+
+{
+  'message': 'Generating main dex classes list for <(jar_path)',
+  'variables': {
+    'jar_paths%': [],
+    'output_path%': '',
+    'main_dex_list_script': '<(DEPTH)/build/android/gyp/main_dex_list.py',
+    'main_dex_rules_path': '<(DEPTH)/build/android/main_dex_classes.flags',
+  },
+  'inputs': [
+    '<@(jar_paths)',
+    '<(main_dex_list_script)',
+    '<(main_dex_rules_path)',
+    '<(multidex_configuration_path)',
+  ],
+  'outputs': [
+    '<(output_path)',
+  ],
+  'action': [
+    'python', '<(main_dex_list_script)',
+    '--main-dex-list-path', '<(output_path)',
+    '--android-sdk-tools', '<(android_sdk_tools)',
+    '--main-dex-rules-path', '<(main_dex_rules_path)',
+    '--multidex-configuration-path', '<(multidex_configuration_path)',
+    '<@(jar_paths)',
+  ]
+}
diff --git a/build/android/main_dex_classes.flags b/build/android/main_dex_classes.flags
new file mode 100644
index 0000000..81152dc
--- /dev/null
+++ b/build/android/main_dex_classes.flags
@@ -0,0 +1,12 @@
+-keep @**.MainDex class * {
+  *;
+}
+
+-keepclasseswithmembers class * {
+  public static ** asInterface(android.os.IBinder);
+}
+
+# Required when code coverage is enabled.
+-keep class com.vladium.** {
+    *;
+}
diff --git a/build/android/method_count.py b/build/android/method_count.py
new file mode 100755
index 0000000..6569f0e
--- /dev/null
+++ b/build/android/method_count.py
@@ -0,0 +1,79 @@
+#! /usr/bin/env python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import argparse
+import os
+import re
+import shutil
+import sys
+import tempfile
+import zipfile
+
+import devil_chromium
+from devil.android.sdk import dexdump
+from pylib.constants import host_paths
+
+sys.path.append(os.path.join(host_paths.DIR_SOURCE_ROOT, 'build', 'util', 'lib',
+                             'common'))
+import perf_tests_results_helper # pylint: disable=import-error
+
+
+_METHOD_IDS_SIZE_RE = re.compile(r'^method_ids_size +: +(\d+)$')
+
+def ExtractIfZip(dexfile, tmpdir):
+  if not os.path.splitext(dexfile)[1] in ('.zip', '.apk', '.jar'):
+    return [dexfile]
+
+  with zipfile.ZipFile(dexfile, 'r') as z:
+    dex_files = [n for n in z.namelist() if n.endswith('.dex')]
+    z.extractall(tmpdir, dex_files)
+
+  return [os.path.join(tmpdir, f) for f in dex_files]
+
+def SingleMethodCount(dexfile):
+  for line in dexdump.DexDump(dexfile, file_summary=True):
+    m = _METHOD_IDS_SIZE_RE.match(line)
+    if m:
+      return m.group(1)
+  raise Exception('"method_ids_size" not found in dex dump of %s' % dexfile)
+
+def MethodCount(dexfile):
+  tmpdir = tempfile.mkdtemp(suffix='_dex_extract')
+  multidex_file_list = ExtractIfZip(dexfile, tmpdir)
+  try:
+    return sum(int(SingleMethodCount(d)) for d in multidex_file_list)
+  finally:
+    shutil.rmtree(tmpdir)
+
+def main():
+  parser = argparse.ArgumentParser()
+  parser.add_argument(
+      '--apk-name', help='Name of the APK to which the dexfile corresponds.')
+  parser.add_argument('dexfile')
+
+  args = parser.parse_args()
+
+  devil_chromium.Initialize()
+
+  if not args.apk_name:
+    dirname, basename = os.path.split(args.dexfile)
+    while basename:
+      if 'apk' in basename:
+        args.apk_name = basename
+        break
+      dirname, basename = os.path.split(dirname)
+    else:
+      parser.error(
+          'Unable to determine apk name from %s, '
+          'and --apk-name was not provided.' % args.dexfile)
+
+  method_count = MethodCount(args.dexfile)
+  perf_tests_results_helper.PrintPerfResult(
+      '%s_methods' % args.apk_name, 'total', [method_count], 'methods')
+  return 0
+
+if __name__ == '__main__':
+  sys.exit(main())
+
diff --git a/build/android/native_app_dependencies.gypi b/build/android/native_app_dependencies.gypi
new file mode 100644
index 0000000..4651ac3
--- /dev/null
+++ b/build/android/native_app_dependencies.gypi
@@ -0,0 +1,76 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This file is meant to be included into a target to provide a rule
+# to strip and place dependent shared libraries required by a native binary in a
+# single folder that can later be pushed to the device.
+#
+# NOTE: consider packaging your binary as an apk instead of running a native
+# library.
+#
+# To use this, create a gyp target with the following form:
+#  {
+#    'target_name': 'target_that_depends_on_my_binary',
+#    'type': 'none',
+#    'dependencies': [
+#      'my_binary',
+#    ],
+#    'variables': {
+#      'native_binary': '<(PRODUCT_DIR)/my_binary',
+#      'output_dir': 'location to place binary and dependent libraries'
+#    },
+#    'includes': [ '../../build/android/native_app_dependencies.gypi' ],
+#  },
+#
+
+{
+  'variables': {
+    'include_main_binary%': 1,
+    'extra_files%': [],
+  },
+  'conditions': [
+      ['android_must_copy_system_libraries == 1', {
+        'dependencies': [
+          '<(DEPTH)/build/android/setup.gyp:copy_system_libraries',
+        ],
+        'variables': {
+          'intermediate_dir': '<(PRODUCT_DIR)/<(_target_name)',
+          'ordered_libraries_file': '<(intermediate_dir)/native_libraries.json',
+        },
+        'actions': [
+          {
+            'variables': {
+              'input_libraries': ['<(native_binary)'],
+            },
+            'includes': ['../../build/android/write_ordered_libraries.gypi'],
+          },
+          {
+            'action_name': 'stripping native libraries',
+            'variables': {
+              'stripped_libraries_dir%': '<(output_dir)',
+              'input_paths': ['<(native_binary)'],
+              'stamp': '<(intermediate_dir)/strip.stamp',
+            },
+            'includes': ['../../build/android/strip_native_libraries.gypi'],
+          },
+        ],
+      }],
+      ['extra_files!=[]', {
+        'copies': [
+          {
+            'destination': '<(output_dir)',
+            'files': [ '<@(extra_files)' ],
+          }
+        ],
+      }],
+      ['include_main_binary==1', {
+        'copies': [
+          {
+            'destination': '<(output_dir)',
+            'files': [ '<(native_binary)' ],
+          }
+        ],
+      }],
+  ],
+}
diff --git a/build/android/ndk.gyp b/build/android/ndk.gyp
new file mode 100644
index 0000000..b491db2
--- /dev/null
+++ b/build/android/ndk.gyp
@@ -0,0 +1,26 @@
+# Copyright (c) 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+  'targets': [
+    {
+      'target_name': 'cpu_features',
+      'type': 'static_library',
+      'direct_dependent_settings': {
+        'include_dirs': [
+          '<(android_ndk_root)/sources/android/cpufeatures',
+        ],
+      },
+      'sources': [
+        '<(android_ndk_root)/sources/android/cpufeatures/cpu-features.c',
+      ],
+      'variables': {
+        'clang_warning_flags': [
+          # cpu-features.c has few unused functions on x86 b/26403333
+          '-Wno-unused-function',
+        ],
+      },
+    },
+  ],
+}
diff --git a/build/android/pack_relocations.gypi b/build/android/pack_relocations.gypi
new file mode 100644
index 0000000..61b4e2c
--- /dev/null
+++ b/build/android/pack_relocations.gypi
@@ -0,0 +1,77 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This file is meant to be included into an action to provide a rule that
+# packs relocations in Release builds of native libraries.
+#
+# To use this, create a gyp target with the following form:
+#  {
+#    'action_name': 'pack_relocations',
+#    'actions': [
+#      'variables': {
+#        'enable_packing': 'pack relocations if 1, plain file copy if 0'
+#        'exclude_packing_list': 'names of libraries explicitly not packed',
+#        'ordered_libraries_file': 'file generated by write_ordered_libraries'
+#        'input_paths': 'files to be added to the list of inputs'
+#        'stamp': 'file to touch when the action is complete'
+#        'stripped_libraries_dir': 'directory holding stripped libraries',
+#        'packed_libraries_dir': 'directory holding packed libraries',
+#      'includes': [ '../../build/android/pack_relocations.gypi' ],
+#    ],
+#  },
+#
+
+{
+  'variables': {
+    'input_paths': [],
+  },
+  'inputs': [
+    '<(DEPTH)/build/android/gyp/util/build_utils.py',
+    '<(DEPTH)/build/android/gyp/pack_relocations.py',
+    '<(ordered_libraries_file)',
+    '>@(input_paths)',
+  ],
+  'outputs': [
+    '<(stamp)',
+  ],
+  'conditions': [
+    ['enable_packing == 1', {
+      'message': 'Packing relocations for <(_target_name)',
+      'dependencies': [
+        '<(DEPTH)/third_party/android_platform/relocation_packer.gyp:android_relocation_packer#host',
+      ],
+      'inputs': [
+        '<(PRODUCT_DIR)/android_relocation_packer',
+      ],
+      'action': [
+        'python', '<(DEPTH)/build/android/gyp/pack_relocations.py',
+        '--configuration-name=<(CONFIGURATION_NAME)',
+        '--enable-packing=1',
+        '--exclude-packing-list=<@(exclude_packing_list)',
+        '--android-pack-relocations=<(PRODUCT_DIR)/android_relocation_packer',
+        '--stripped-libraries-dir=<(stripped_libraries_dir)',
+        '--packed-libraries-dir=<(packed_libraries_dir)',
+        '--libraries=@FileArg(<(ordered_libraries_file):libraries)',
+        '--stamp=<(stamp)',
+      ],
+    }, {
+      'message': 'Copying libraries (no relocation packing) for <(_target_name)',
+      'action': [
+        'python', '<(DEPTH)/build/android/gyp/pack_relocations.py',
+        '--configuration-name=<(CONFIGURATION_NAME)',
+        '--enable-packing=0',
+        '--stripped-libraries-dir=<(stripped_libraries_dir)',
+        '--packed-libraries-dir=<(packed_libraries_dir)',
+        '--libraries=@FileArg(<(ordered_libraries_file):libraries)',
+        '--stamp=<(stamp)',
+      ],
+    }],
+    ['android_must_copy_system_libraries == 1', {
+      # Add a fake output to force the build to always re-run this step. This
+      # is required because the real inputs are not known at gyp-time and
+      # changing base.so may not trigger changes to dependent libraries.
+      'outputs': [ '<(stamp).fake' ]
+    }],
+  ],
+}
diff --git a/build/android/package_resources_action.gypi b/build/android/package_resources_action.gypi
new file mode 100644
index 0000000..a83c02d
--- /dev/null
+++ b/build/android/package_resources_action.gypi
@@ -0,0 +1,105 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This file is a helper to java_apk.gypi. It should be used to create an
+# action that runs ApkBuilder via ANT.
+#
+# Required variables:
+#  apk_name - File name (minus path & extension) of the output apk.
+#  android_manifest_path - Path to AndroidManifest.xml.
+#  app_manifest_version_name - set the apps 'human readable' version number.
+#  app_manifest_version_code - set the apps version number.
+# Optional variables:
+#  asset_location - The directory where assets are located (if any).
+#  create_density_splits - Whether to create density-based apk splits. Splits
+#    are supported only for minSdkVersion >= 21.
+#  language_splits - List of languages to create apk splits for.
+#  resource_zips - List of paths to resource zip files.
+#  shared_resources - Make a resource package that can be loaded by a different
+#    application at runtime to access the package's resources.
+#  app_as_shared_library - Make a resource package that can be loaded as shared
+#    library.
+#  extensions_to_not_compress - E.g.: 'pak,dat,bin'
+#  extra_inputs - List of extra action inputs.
+{
+  'variables': {
+    'asset_location%': '',
+    'create_density_splits%': 0,
+    'resource_zips%': [],
+    'shared_resources%': 0,
+    'app_as_shared_library%': 0,
+    'extensions_to_not_compress%': '',
+    'extra_inputs%': [],
+    'resource_packaged_apk_name': '<(apk_name)-resources.ap_',
+    'resource_packaged_apk_path': '<(intermediate_dir)/<(resource_packaged_apk_name)',
+  },
+  'action_name': 'package_resources_<(apk_name)',
+  'message': 'packaging resources for <(apk_name)',
+  'inputs': [
+    # TODO: This isn't always rerun correctly, http://crbug.com/351928
+    '<(DEPTH)/build/android/gyp/util/build_utils.py',
+    '<(DEPTH)/build/android/gyp/package_resources.py',
+    '<(android_manifest_path)',
+    '<@(extra_inputs)',
+  ],
+  'outputs': [
+    '<(resource_packaged_apk_path)',
+  ],
+  'action': [
+    'python', '<(DEPTH)/build/android/gyp/package_resources.py',
+    '--android-sdk-jar', '<(android_sdk_jar)',
+    '--aapt-path', '<(android_aapt_path)',
+    '--configuration-name', '<(CONFIGURATION_NAME)',
+    '--android-manifest', '<(android_manifest_path)',
+    '--version-code', '<(app_manifest_version_code)',
+    '--version-name', '<(app_manifest_version_name)',
+    '--no-compress', '<(extensions_to_not_compress)',
+    '--apk-path', '<(resource_packaged_apk_path)',
+  ],
+  'conditions': [
+    ['shared_resources == 1', {
+      'action': [
+        '--shared-resources',
+      ],
+    }],
+    ['app_as_shared_library == 1', {
+      'action': [
+        '--app-as-shared-lib',
+      ],
+    }],
+    ['asset_location != ""', {
+      'action': [
+        '--asset-dir', '<(asset_location)',
+      ],
+    }],
+    ['create_density_splits == 1', {
+      'action': [
+        '--create-density-splits',
+      ],
+      'outputs': [
+        '<(resource_packaged_apk_path)_hdpi',
+        '<(resource_packaged_apk_path)_xhdpi',
+        '<(resource_packaged_apk_path)_xxhdpi',
+        '<(resource_packaged_apk_path)_xxxhdpi',
+        '<(resource_packaged_apk_path)_tvdpi',
+      ],
+    }],
+    ['language_splits != []', {
+      'action': [
+        '--language-splits=<(language_splits)',
+      ],
+      'outputs': [
+        "<!@(python <(DEPTH)/build/apply_locales.py '<(resource_packaged_apk_path)_ZZLOCALE' <(language_splits))",
+      ],
+    }],
+    ['resource_zips != []', {
+      'action': [
+        '--resource-zips', '>(resource_zips)',
+      ],
+      'inputs': [
+        '>@(resource_zips)',
+      ],
+    }],
+  ],
+}
diff --git a/build/android/play_services/LICENSE.sha1 b/build/android/play_services/LICENSE.sha1
new file mode 100644
index 0000000..8e606a7
--- /dev/null
+++ b/build/android/play_services/LICENSE.sha1
@@ -0,0 +1 @@
+11cc73d4b7fa82560fbf5bbc1095dbac30308e7c
\ No newline at end of file
diff --git a/build/android/play_services/__init__.py b/build/android/play_services/__init__.py
new file mode 100644
index 0000000..50b23df
--- /dev/null
+++ b/build/android/play_services/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/build/android/play_services/config.json b/build/android/play_services/config.json
new file mode 100644
index 0000000..fb38ef9
--- /dev/null
+++ b/build/android/play_services/config.json
@@ -0,0 +1,4 @@
+{
+  "version_number": 8487000,
+  "version_xml_path": "res/values/version.xml"
+}
diff --git a/build/android/play_services/google_play_services_library.zip.sha1 b/build/android/play_services/google_play_services_library.zip.sha1
new file mode 100644
index 0000000..fbd34e4
--- /dev/null
+++ b/build/android/play_services/google_play_services_library.zip.sha1
@@ -0,0 +1 @@
+1db2c536157710a4fe7edb59454e0b8f8b7e51bd
\ No newline at end of file
diff --git a/build/android/play_services/preprocess.py b/build/android/play_services/preprocess.py
new file mode 100755
index 0000000..99c000e
--- /dev/null
+++ b/build/android/play_services/preprocess.py
@@ -0,0 +1,274 @@
+#!/usr/bin/env python
+#
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+'''Prepares the Google Play services split client libraries before usage by
+Chrome's build system.
+
+We need to preprocess Google Play services before using it in Chrome
+builds for 2 main reasons:
+
+- Getting rid of unused resources: unsupported languages, unused
+drawables, etc.
+
+- Merging the differents jars so that it can be proguarded more
+easily. This is necessary since debug and test apks get very close
+to the dex limit.
+
+The script is supposed to be used with the maven repository that can be
+obtained by downloading the "extra-google-m2repository" from the Android SDK
+Manager. It also supports importing from already extracted AAR files using the
+--is-extracted-repo flag. The expected directory structure in that case would
+look like:
+
+    REPOSITORY_DIR
+    +-- CLIENT_1
+    |   +-- <content of the first AAR file>
+    +-- CLIENT_2
+    +-- etc.
+
+The output is a directory with the following structure:
+
+    OUT_DIR
+    +-- google-play-services.jar
+    +-- res
+    |   +-- CLIENT_1
+    |   |   +-- color
+    |   |   +-- values
+    |   |   +-- etc.
+    |   +-- CLIENT_2
+    |       +-- ...
+    +-- stub
+        +-- res/[.git-keep-directory]
+        +-- src/android/UnusedStub.java
+
+Requires the `jar` utility in the path.
+
+'''
+
+import argparse
+import glob
+import itertools
+import os
+import shutil
+import stat
+import sys
+import tempfile
+import zipfile
+
+from datetime import datetime
+
+sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
+import devil_chromium
+from devil.utils import cmd_helper
+from play_services import utils
+from pylib.utils import argparse_utils
+
+
+M2_PKG_PATH = os.path.join('com', 'google', 'android', 'gms')
+
+
+def main():
+  parser = argparse.ArgumentParser(description=(
+      "Prepares the Google Play services split client libraries before usage "
+      "by Chrome's build system. See the script's documentation for more a "
+      "detailed help."))
+  argparse_utils.CustomHelpAction.EnableFor(parser)
+  required_args = parser.add_argument_group('required named arguments')
+  required_args.add_argument('-r',
+                             '--repository',
+                             help=('the Google Play services repository '
+                                   'location'),
+                             required=True,
+                             metavar='FILE')
+  required_args.add_argument('-o',
+                             '--out-dir',
+                             help='the output directory',
+                             required=True,
+                             metavar='FILE')
+  required_args.add_argument('-c',
+                             '--config-file',
+                             help='the config file path',
+                             required=True,
+                             metavar='FILE')
+  parser.add_argument('-x',
+                      '--is-extracted-repo',
+                      action='store_true',
+                      help='the provided repository is not made of AAR files')
+  parser.add_argument('--config-help',
+                      action='custom_help',
+                      custom_help_text=utils.ConfigParser.__doc__,
+                      help='show the configuration file format help')
+
+  args = parser.parse_args()
+
+  devil_chromium.Initialize()
+
+  return ProcessGooglePlayServices(args.repository,
+                                   args.out_dir,
+                                   args.config_file,
+                                   args.is_extracted_repo)
+
+
+def ProcessGooglePlayServices(repo, out_dir, config_path, is_extracted_repo):
+  config = utils.ConfigParser(config_path)
+
+  tmp_root = tempfile.mkdtemp()
+  try:
+    tmp_paths = _SetupTempDir(tmp_root)
+
+    if is_extracted_repo:
+      _ImportFromExtractedRepo(config, tmp_paths, repo)
+    else:
+      _ImportFromAars(config, tmp_paths, repo)
+
+    _GenerateCombinedJar(tmp_paths)
+    _ProcessResources(config, tmp_paths, repo)
+    _BuildOutput(config, tmp_paths, out_dir)
+  finally:
+    shutil.rmtree(tmp_root)
+
+  return 0
+
+
+def _SetupTempDir(tmp_root):
+  tmp_paths = {
+      'root': tmp_root,
+      'imported_clients': os.path.join(tmp_root, 'imported_clients'),
+      'extracted_jars': os.path.join(tmp_root, 'jar'),
+      'combined_jar': os.path.join(tmp_root, 'google-play-services.jar'),
+  }
+  os.mkdir(tmp_paths['imported_clients'])
+  os.mkdir(tmp_paths['extracted_jars'])
+
+  return tmp_paths
+
+
+def _SetupOutputDir(out_dir):
+  out_paths = {
+      'root': out_dir,
+      'res': os.path.join(out_dir, 'res'),
+      'jar': os.path.join(out_dir, 'google-play-services.jar'),
+      'stub': os.path.join(out_dir, 'stub'),
+  }
+
+  shutil.rmtree(out_paths['jar'], ignore_errors=True)
+  shutil.rmtree(out_paths['res'], ignore_errors=True)
+  shutil.rmtree(out_paths['stub'], ignore_errors=True)
+
+  return out_paths
+
+
+def _MakeWritable(dir_path):
+  for root, dirs, files in os.walk(dir_path):
+    for path in itertools.chain(dirs, files):
+      st = os.stat(os.path.join(root, path))
+      os.chmod(os.path.join(root, path), st.st_mode | stat.S_IWUSR)
+
+
+def _ImportFromAars(config, tmp_paths, repo):
+  for client in config.clients:
+    aar_name = '%s-%s.aar' % (client, config.sdk_version)
+    aar_path = os.path.join(repo, M2_PKG_PATH, client,
+                            config.sdk_version, aar_name)
+    aar_out_path = os.path.join(tmp_paths['imported_clients'], client)
+    _ExtractAll(aar_path, aar_out_path)
+
+    client_jar_path = os.path.join(aar_out_path, 'classes.jar')
+    _ExtractAll(client_jar_path, tmp_paths['extracted_jars'])
+
+
+def _ImportFromExtractedRepo(config, tmp_paths, repo):
+  # Import the clients
+  try:
+    for client in config.clients:
+      client_out_dir = os.path.join(tmp_paths['imported_clients'], client)
+      shutil.copytree(os.path.join(repo, client), client_out_dir)
+
+      client_jar_path = os.path.join(client_out_dir, 'classes.jar')
+      _ExtractAll(client_jar_path, tmp_paths['extracted_jars'])
+  finally:
+    _MakeWritable(tmp_paths['imported_clients'])
+
+
+def _GenerateCombinedJar(tmp_paths):
+  out_file_name = tmp_paths['combined_jar']
+  working_dir = tmp_paths['extracted_jars']
+  cmd_helper.Call(['jar', '-cf', out_file_name, '-C', working_dir, '.'])
+
+
+def _ProcessResources(config, tmp_paths, repo):
+  LOCALIZED_VALUES_BASE_NAME = 'values-'
+  locale_whitelist = set(config.locale_whitelist)
+
+  glob_pattern = os.path.join(tmp_paths['imported_clients'], '*', 'res', '*')
+  for res_dir in glob.glob(glob_pattern):
+    dir_name = os.path.basename(res_dir)
+
+    if dir_name.startswith('drawable'):
+      shutil.rmtree(res_dir)
+      continue
+
+    if dir_name.startswith(LOCALIZED_VALUES_BASE_NAME):
+      dir_locale = dir_name[len(LOCALIZED_VALUES_BASE_NAME):]
+      if dir_locale not in locale_whitelist:
+        shutil.rmtree(res_dir)
+
+  # Reimport files from the whitelist.
+  for res_path in config.resource_whitelist:
+    for whitelisted_file in glob.glob(os.path.join(repo, res_path)):
+      resolved_file = os.path.relpath(whitelisted_file, repo)
+      rebased_res = os.path.join(tmp_paths['imported_clients'], resolved_file)
+
+      if not os.path.exists(os.path.dirname(rebased_res)):
+        os.makedirs(os.path.dirname(rebased_res))
+
+      shutil.copy(os.path.join(repo, whitelisted_file), rebased_res)
+
+
+def _BuildOutput(config, tmp_paths, out_dir):
+  generation_date = datetime.utcnow()
+  version_xml_path = os.path.join(tmp_paths['imported_clients'],
+                                  config.version_xml_path)
+  play_services_full_version = utils.GetVersionNumberFromLibraryResources(
+      version_xml_path)
+
+  out_paths = _SetupOutputDir(out_dir)
+
+  # Copy the resources to the output dir
+  for client in config.clients:
+    res_in_tmp_dir = os.path.join(tmp_paths['imported_clients'], client, 'res')
+    if os.path.isdir(res_in_tmp_dir) and os.listdir(res_in_tmp_dir):
+      res_in_final_dir = os.path.join(out_paths['res'], client)
+      shutil.copytree(res_in_tmp_dir, res_in_final_dir)
+
+  # Copy the jar
+  shutil.copyfile(tmp_paths['combined_jar'], out_paths['jar'])
+
+  # Write the java dummy stub. Needed for gyp to create the resource jar
+  stub_location = os.path.join(out_paths['stub'], 'src', 'android')
+  os.makedirs(stub_location)
+  with open(os.path.join(stub_location, 'UnusedStub.java'), 'w') as stub:
+    stub.write('package android;'
+               'public final class UnusedStub {'
+               '    private UnusedStub() {}'
+               '}')
+
+  # Create the main res directory. It is needed by gyp
+  stub_res_location = os.path.join(out_paths['stub'], 'res')
+  os.makedirs(stub_res_location)
+  with open(os.path.join(stub_res_location, '.res-stamp'), 'w') as stamp:
+    content_str = 'google_play_services_version: %s\nutc_date: %s\n'
+    stamp.write(content_str % (play_services_full_version, generation_date))
+
+  config.UpdateVersionNumber(play_services_full_version)
+
+
+def _ExtractAll(zip_path, out_path):
+  with zipfile.ZipFile(zip_path, 'r') as zip_file:
+    zip_file.extractall(out_path)
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/build/android/play_services/update.py b/build/android/play_services/update.py
new file mode 100755
index 0000000..8a70325
--- /dev/null
+++ b/build/android/play_services/update.py
@@ -0,0 +1,515 @@
+#!/usr/bin/env python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+'''
+Script to help uploading and downloading the Google Play services library to
+and from a Google Cloud storage.
+'''
+
+import argparse
+import logging
+import os
+import re
+import shutil
+import sys
+import tempfile
+import zipfile
+
+sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
+import devil_chromium
+from devil.utils import cmd_helper
+from play_services import utils
+from pylib import constants
+from pylib.constants import host_paths
+from pylib.utils import logging_utils
+
+sys.path.append(os.path.join(host_paths.DIR_SOURCE_ROOT, 'build'))
+import find_depot_tools  # pylint: disable=import-error,unused-import
+import breakpad
+import download_from_google_storage
+import upload_to_google_storage
+
+
+# Directory where the SHA1 files for the zip and the license are stored
+# It should be managed by git to provided information about new versions.
+SHA1_DIRECTORY = os.path.join(host_paths.DIR_SOURCE_ROOT, 'build', 'android',
+                              'play_services')
+
+# Default bucket used for storing the files.
+GMS_CLOUD_STORAGE = 'chromium-android-tools/play-services'
+
+# Path to the default configuration file. It exposes the currently installed
+# version of the library in a human readable way.
+CONFIG_DEFAULT_PATH = os.path.join(host_paths.DIR_SOURCE_ROOT, 'build',
+                                   'android', 'play_services', 'config.json')
+
+LICENSE_FILE_NAME = 'LICENSE'
+ZIP_FILE_NAME = 'google_play_services_library.zip'
+GMS_PACKAGE_ID = 'extra-google-google_play_services'  # used by sdk manager
+
+LICENSE_PATTERN = re.compile(r'^Pkg\.License=(?P<text>.*)$', re.MULTILINE)
+
+
+def main(raw_args):
+  parser = argparse.ArgumentParser(
+      description=__doc__ + 'Please see the subcommand help for more details.',
+      formatter_class=utils.DefaultsRawHelpFormatter)
+  subparsers = parser.add_subparsers(title='commands')
+
+  # Download arguments
+  parser_download = subparsers.add_parser(
+      'download',
+      help='download the library from the cloud storage',
+      description=Download.__doc__,
+      formatter_class=utils.DefaultsRawHelpFormatter)
+  parser_download.set_defaults(func=Download)
+  AddBasicArguments(parser_download)
+  AddBucketArguments(parser_download)
+
+  # SDK Update arguments
+  parser_sdk = subparsers.add_parser(
+      'sdk',
+      help='get the latest Google Play services SDK using Android SDK Manager',
+      description=UpdateSdk.__doc__,
+      formatter_class=utils.DefaultsRawHelpFormatter)
+  parser_sdk.set_defaults(func=UpdateSdk)
+  AddBasicArguments(parser_sdk)
+
+  # Upload arguments
+  parser_upload = subparsers.add_parser(
+      'upload',
+      help='upload the library to the cloud storage',
+      description=Upload.__doc__,
+      formatter_class=utils.DefaultsRawHelpFormatter)
+
+  parser_upload.add_argument('--skip-git',
+                             action='store_true',
+                             help="don't commit the changes at the end")
+  parser_upload.set_defaults(func=Upload)
+  AddBasicArguments(parser_upload)
+  AddBucketArguments(parser_upload)
+
+  args = parser.parse_args(raw_args)
+  if args.verbose:
+    logging.basicConfig(level=logging.DEBUG)
+  logging_utils.ColorStreamHandler.MakeDefault(not _IsBotEnvironment())
+  devil_chromium.Initialize()
+  return args.func(args)
+
+
+def AddBasicArguments(parser):
+  '''
+  Defines the common arguments on subparser rather than the main one. This
+  allows to put arguments after the command: `foo.py upload --debug --force`
+  instead of `foo.py --debug upload --force`
+  '''
+
+  parser.add_argument('--sdk-root',
+                      help='base path to the Android SDK tools root',
+                      default=constants.ANDROID_SDK_ROOT)
+
+  parser.add_argument('-v', '--verbose',
+                      action='store_true',
+                      help='print debug information')
+
+
+def AddBucketArguments(parser):
+  parser.add_argument('--bucket',
+                      help='name of the bucket where the files are stored',
+                      default=GMS_CLOUD_STORAGE)
+
+  parser.add_argument('--config',
+                      help='JSON Configuration file',
+                      default=CONFIG_DEFAULT_PATH)
+
+  parser.add_argument('--dry-run',
+                      action='store_true',
+                      help=('run the script in dry run mode. Files will be '
+                            'copied to a local directory instead of the '
+                            'cloud storage. The bucket name will be as path '
+                            'to that directory relative to the repository '
+                            'root.'))
+
+  parser.add_argument('-f', '--force',
+                      action='store_true',
+                      help='run even if the library is already up to date')
+
+
+def Download(args):
+  '''
+  Downloads the Google Play services library from a Google Cloud Storage bucket
+  and installs it to
+  //third_party/android_tools/sdk/extras/google/google_play_services.
+
+  A license check will be made, and the user might have to accept the license
+  if that has not been done before.
+  '''
+
+  if not os.path.isdir(args.sdk_root):
+    logging.debug('Did not find the Android SDK root directory at "%s".',
+                  args.sdk_root)
+    if not args.force:
+      logging.info('Skipping, not on an android checkout.')
+      return 0
+
+  config = utils.ConfigParser(args.config)
+  paths = PlayServicesPaths(args.sdk_root, config.version_xml_path)
+
+  if os.path.isdir(paths.package) and not os.access(paths.package, os.W_OK):
+    logging.error('Failed updating the Google Play Services library. '
+                  'The location is not writable. Please remove the '
+                  'directory (%s) and try again.', paths.package)
+    return -2
+
+  new_lib_zip_sha1 = os.path.join(SHA1_DIRECTORY, ZIP_FILE_NAME + '.sha1')
+
+  logging.debug('Comparing zip hashes: %s and %s', new_lib_zip_sha1,
+                paths.lib_zip_sha1)
+  if utils.FileEquals(new_lib_zip_sha1, paths.lib_zip_sha1) and not args.force:
+    logging.info('Skipping, the Google Play services library is up to date.')
+    return 0
+
+  bucket_path = _VerifyBucketPathFormat(args.bucket,
+                                        config.version_number,
+                                        args.dry_run)
+
+  tmp_root = tempfile.mkdtemp()
+  try:
+    # setup the destination directory
+    if not os.path.isdir(paths.package):
+      os.makedirs(paths.package)
+
+    # download license file from bucket/{version_number}/license.sha1
+    new_license = os.path.join(tmp_root, LICENSE_FILE_NAME)
+
+    license_sha1 = os.path.join(SHA1_DIRECTORY, LICENSE_FILE_NAME + '.sha1')
+    _DownloadFromBucket(bucket_path, license_sha1, new_license,
+                        args.verbose, args.dry_run)
+
+    if (not _IsBotEnvironment() and
+        not _CheckLicenseAgreement(new_license, paths.license,
+                                   config.version_number)):
+        logging.warning('Your version of the Google Play services library is '
+                        'not up to date. You might run into issues building '
+                        'or running the app. Please run `%s download` to '
+                        'retry downloading it.', __file__)
+        return 0
+
+    new_lib_zip = os.path.join(tmp_root, ZIP_FILE_NAME)
+    _DownloadFromBucket(bucket_path, new_lib_zip_sha1, new_lib_zip,
+                        args.verbose, args.dry_run)
+
+    try:
+      # We remove the current version of the Google Play services SDK.
+      if os.path.exists(paths.package):
+        shutil.rmtree(paths.package)
+      os.makedirs(paths.package)
+
+      logging.debug('Extracting the library to %s', paths.lib)
+      with zipfile.ZipFile(new_lib_zip, "r") as new_lib_zip_file:
+        new_lib_zip_file.extractall(paths.lib)
+
+      logging.debug('Copying %s to %s', new_license, paths.license)
+      shutil.copy(new_license, paths.license)
+
+      logging.debug('Copying %s to %s', new_lib_zip_sha1, paths.lib_zip_sha1)
+      shutil.copy(new_lib_zip_sha1, paths.lib_zip_sha1)
+
+      logging.info('Update complete.')
+
+    except Exception as e:  # pylint: disable=broad-except
+      logging.error('Failed updating the Google Play Services library. '
+                    'An error occurred while installing the new version in '
+                    'the SDK directory: %s ', e)
+      return -3
+  finally:
+    shutil.rmtree(tmp_root)
+
+  return 0
+
+
+def UpdateSdk(args):
+  '''
+  Uses the Android SDK Manager to download the latest Google Play services SDK
+  locally. Its usual installation path is
+  //third_party/android_tools/sdk/extras/google/google_play_services
+  '''
+
+  # This should function should not run on bots and could fail for many user
+  # and setup related reasons. Also, exceptions here are not caught, so we
+  # disable breakpad to avoid spamming the logs.
+  breakpad.IS_ENABLED = False
+
+  sdk_manager = os.path.join(args.sdk_root, 'tools', 'android')
+  cmd = [sdk_manager, 'update', 'sdk', '--no-ui', '--filter', GMS_PACKAGE_ID]
+  cmd_helper.Call(cmd)
+  # If no update is needed, it still returns successfully so we just do nothing
+
+  return 0
+
+
+def Upload(args):
+  '''
+  Uploads the library from the local Google Play services SDK to a Google Cloud
+  storage bucket.
+
+  By default, a local commit will be made at the end of the operation.
+  '''
+
+  # This should function should not run on bots and could fail for many user
+  # and setup related reasons. Also, exceptions here are not caught, so we
+  # disable breakpad to avoid spamming the logs.
+  breakpad.IS_ENABLED = False
+
+  config = utils.ConfigParser(args.config)
+  paths = PlayServicesPaths(args.sdk_root, config.version_xml_path)
+
+  if not args.skip_git and utils.IsRepoDirty(host_paths.DIR_SOURCE_ROOT):
+    logging.error('The repo is dirty. Please commit or stash your changes.')
+    return -1
+
+  new_version_number = utils.GetVersionNumberFromLibraryResources(
+      paths.version_xml)
+  logging.debug('comparing versions: new=%d, old=%s',
+                new_version_number, config.version_number)
+  if new_version_number <= config.version_number and not args.force:
+    logging.info('The checked in version of the library is already the latest '
+                 'one. No update is needed. Please rerun with --force to skip '
+                 'this check.')
+    return 0
+
+  tmp_root = tempfile.mkdtemp()
+  try:
+    new_lib_zip = os.path.join(tmp_root, ZIP_FILE_NAME)
+    new_license = os.path.join(tmp_root, LICENSE_FILE_NAME)
+
+    # need to strip '.zip' from the file name here
+    shutil.make_archive(new_lib_zip[:-4], 'zip', paths.lib)
+    _ExtractLicenseFile(new_license, paths.source_prop)
+
+    bucket_path = _VerifyBucketPathFormat(args.bucket, new_version_number,
+                                          args.dry_run)
+    files_to_upload = [new_lib_zip, new_license]
+    logging.debug('Uploading %s to %s', files_to_upload, bucket_path)
+    _UploadToBucket(bucket_path, files_to_upload, args.dry_run)
+
+    new_lib_zip_sha1 = os.path.join(SHA1_DIRECTORY,
+                                    ZIP_FILE_NAME + '.sha1')
+    new_license_sha1 = os.path.join(SHA1_DIRECTORY,
+                                    LICENSE_FILE_NAME + '.sha1')
+    shutil.copy(new_lib_zip + '.sha1', new_lib_zip_sha1)
+    shutil.copy(new_license + '.sha1', new_license_sha1)
+  finally:
+    shutil.rmtree(tmp_root)
+
+  config.UpdateVersionNumber(new_version_number)
+
+  if not args.skip_git:
+    commit_message = ('Update the Google Play services dependency to %s\n'
+                      '\n') % new_version_number
+    utils.MakeLocalCommit(host_paths.DIR_SOURCE_ROOT,
+                          [new_lib_zip_sha1, new_license_sha1, config.path],
+                          commit_message)
+
+  return 0
+
+
+def _DownloadFromBucket(bucket_path, sha1_file, destination, verbose,
+                        is_dry_run):
+  '''Downloads the file designated by the provided sha1 from a cloud bucket.'''
+
+  download_from_google_storage.download_from_google_storage(
+      input_filename=sha1_file,
+      base_url=bucket_path,
+      gsutil=_InitGsutil(is_dry_run),
+      num_threads=1,
+      directory=None,
+      recursive=False,
+      force=False,
+      output=destination,
+      ignore_errors=False,
+      sha1_file=sha1_file,
+      verbose=verbose,
+      auto_platform=True,
+      extract=False)
+
+
+def _UploadToBucket(bucket_path, files_to_upload, is_dry_run):
+  '''Uploads the files designated by the provided paths to a cloud bucket. '''
+
+  upload_to_google_storage.upload_to_google_storage(
+      input_filenames=files_to_upload,
+      base_url=bucket_path,
+      gsutil=_InitGsutil(is_dry_run),
+      force=False,
+      use_md5=False,
+      num_threads=1,
+      skip_hashing=False,
+      gzip=None)
+
+
+def _InitGsutil(is_dry_run):
+  '''Initialize the Gsutil object as regular or dummy version for dry runs. '''
+
+  if is_dry_run:
+    return DummyGsutil()
+  else:
+    return download_from_google_storage.Gsutil(
+        download_from_google_storage.GSUTIL_DEFAULT_PATH)
+
+
+def _ExtractLicenseFile(license_path, prop_file_path):
+  with open(prop_file_path, 'r') as prop_file:
+    prop_file_content = prop_file.read()
+
+  match = LICENSE_PATTERN.search(prop_file_content)
+  if not match:
+    raise AttributeError('The license was not found in ' +
+                         os.path.abspath(prop_file_path))
+
+  with open(license_path, 'w') as license_file:
+    license_file.write(match.group('text'))
+
+
+def _CheckLicenseAgreement(expected_license_path, actual_license_path,
+                           version_number):
+  '''
+  Checks that the new license is the one already accepted by the user. If it
+  isn't, it prompts the user to accept it. Returns whether the expected license
+  has been accepted.
+  '''
+
+  if utils.FileEquals(expected_license_path, actual_license_path):
+    return True
+
+  with open(expected_license_path) as license_file:
+    # Uses plain print rather than logging to make sure this is not formatted
+    # by the logger.
+    print ('Updating the Google Play services SDK to '
+           'version %d.' % version_number)
+
+    # The output is buffered when running as part of gclient hooks. We split
+    # the text here and flush is explicitly to avoid having part of it dropped
+    # out.
+    # Note: text contains *escaped* new lines, so we split by '\\n', not '\n'.
+    for license_part in license_file.read().split('\\n'):
+      print license_part
+      sys.stdout.flush()
+
+  # Need to put the prompt on a separate line otherwise the gclient hook buffer
+  # only prints it after we received an input.
+  print 'Do you accept the license? [y/n]: '
+  sys.stdout.flush()
+  return raw_input('> ') in ('Y', 'y')
+
+
+def _IsBotEnvironment():
+  return bool(os.environ.get('CHROME_HEADLESS'))
+
+
+def _VerifyBucketPathFormat(bucket_name, version_number, is_dry_run):
+  '''
+  Formats and checks the download/upload path depending on whether we are
+  running in dry run mode or not. Returns a supposedly safe path to use with
+  Gsutil.
+  '''
+
+  if is_dry_run:
+    bucket_path = os.path.abspath(os.path.join(bucket_name,
+                                               str(version_number)))
+    if not os.path.isdir(bucket_path):
+      os.makedirs(bucket_path)
+  else:
+    if bucket_name.startswith('gs://'):
+      # We enforce the syntax without gs:// for consistency with the standalone
+      # download/upload scripts and to make dry run transition easier.
+      raise AttributeError('Please provide the bucket name without the gs:// '
+                           'prefix (e.g. %s)' % GMS_CLOUD_STORAGE)
+    bucket_path = 'gs://%s/%d' % (bucket_name, version_number)
+
+  return bucket_path
+
+
+class PlayServicesPaths(object):
+  '''
+  Describes the different paths to be used in the update process.
+
+         Filesystem hierarchy                        | Exposed property / notes
+  ---------------------------------------------------|-------------------------
+  [sdk_root]                                         | sdk_root / (1)
+   +- extras                                         |
+      +- google                                      |
+         +- google_play_services                     | package / (2)
+            +- source.properties                     | source_prop / (3)
+            +- LICENSE                               | license / (4)
+            +- google_play_services_library.zip.sha1 | lib_zip_sha1 / (5)
+            +- libproject                            |
+               +- google-play-services_lib           | lib / (6)
+                  +- res                             |
+                     +- values                       |
+                        +- version.xml               | version_xml (7)
+
+  Notes:
+
+   1. sdk_root: Path provided as a parameter to the script (--sdk_root)
+   2. package: This directory contains the Google Play services SDK itself.
+      When downloaded via the Android SDK manager, it will contain,
+      documentation, samples and other files in addition to the library. When
+      the update script downloads the library from our cloud storage, it is
+      cleared.
+   3. source_prop: File created by the Android SDK manager that contains
+      the package information, such as the version info and the license.
+   4. license: File created by the update script. Contains the license accepted
+      by the user.
+   5. lib_zip_sha1: sha1 of the library zip that has been installed by the
+      update script. It is compared with the one required by the config file to
+      check if an update is necessary.
+   6. lib: Contains the library itself: jar and resources. This is what is
+      downloaded from the cloud storage.
+   7. version_xml: File that contains the exact Google Play services library
+      version, the one that we track. The version looks like 811500, is used in
+      the code and the on-device APK, as opposed to the SDK package version
+      which looks like 27.0.0 and is used only by the Android SDK manager.
+
+  '''
+
+  def __init__(self, sdk_root, version_xml_path):
+    relative_package = os.path.join('extras', 'google', 'google_play_services')
+    relative_lib = os.path.join(relative_package, 'libproject',
+                                'google-play-services_lib')
+    self.sdk_root = sdk_root
+
+    self.package = os.path.join(sdk_root, relative_package)
+    self.lib_zip_sha1 = os.path.join(self.package, ZIP_FILE_NAME + '.sha1')
+    self.license = os.path.join(self.package, LICENSE_FILE_NAME)
+    self.source_prop = os.path.join(self.package, 'source.properties')
+
+    self.lib = os.path.join(sdk_root, relative_lib)
+    self.version_xml = os.path.join(self.lib, version_xml_path)
+
+
+class DummyGsutil(download_from_google_storage.Gsutil):
+  '''
+  Class that replaces Gsutil to use a local directory instead of an online
+  bucket. It relies on the fact that Gsutil commands are very similar to shell
+  ones, so for the ones used here (ls, cp), it works to just use them with a
+  local directory.
+  '''
+
+  def __init__(self):
+    super(DummyGsutil, self).__init__(
+        download_from_google_storage.GSUTIL_DEFAULT_PATH)
+
+  def call(self, *args):
+    logging.debug('Calling command "%s"', str(args))
+    return cmd_helper.GetCmdStatusOutputAndError(args)
+
+  def check_call(self, *args):
+    logging.debug('Calling command "%s"', str(args))
+    return cmd_helper.GetCmdStatusOutputAndError(args)
+
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv[1:]))
diff --git a/build/android/play_services/update_test.py b/build/android/play_services/update_test.py
new file mode 100755
index 0000000..fd68154
--- /dev/null
+++ b/build/android/play_services/update_test.py
@@ -0,0 +1,416 @@
+#!/usr/bin/env python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+'''Unittests for update.py.
+
+They set up a temporary directory that is used to mock a bucket, the directory
+containing the configuration files and the android sdk directory.
+
+Tests run the script with various inputs and check the status of the filesystem
+'''
+
+import shutil
+import tempfile
+import unittest
+import os
+import sys
+import zipfile
+import contextlib
+
+sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
+from play_services import update
+
+
+class TestFunctions(unittest.TestCase):
+  DEFAULT_CONFIG_VERSION = 42
+  DEFAULT_LICENSE = 'Default License'
+  DEFAULT_ZIP_SHA1 = 'zip0and0filling0to0forty0chars0000000000'
+
+  def __init__(self, *args, **kwargs):
+    super(TestFunctions, self).__init__(*args, **kwargs)
+    self.paths = None  # Initialized in SetUpWorkdir
+    self.workdir = None  # Initialized in setUp
+
+  #override
+  def setUp(self):
+    self.workdir = tempfile.mkdtemp()
+
+  #override
+  def tearDown(self):
+    shutil.rmtree(self.workdir)
+    self.workdir = None
+
+  def testUpload(self):
+    version = 1337
+    self.SetUpWorkdir(
+        xml_version=version,
+        gms_lib=True,
+        source_prop=True)
+
+    status = update.main([
+        'upload',
+        '--dry-run',
+        '--skip-git',
+        '--bucket', self.paths.bucket,
+        '--config', self.paths.config_file,
+        '--sdk-root', self.paths.sdk_root
+    ])
+    self.assertEqual(status, 0, 'the command should have succeeded.')
+
+    # bucket should contain license, name = license.sha1
+    self.assertTrue(os.path.isfile(self.paths.config_license_sha1))
+    license_sha1 = _GetFileContent(self.paths.config_license_sha1)
+    bucket_license = os.path.join(self.paths.bucket, str(version),
+                                  license_sha1)
+    self.assertTrue(os.path.isfile(bucket_license))
+    self.assertEqual(_GetFileContent(bucket_license), self.DEFAULT_LICENSE)
+
+    # bucket should contain zip, name = zip.sha1
+    self.assertTrue(os.path.isfile(self.paths.config_zip_sha1))
+    bucket_zip = os.path.join(self.paths.bucket, str(version),
+                              _GetFileContent(self.paths.config_zip_sha1))
+    self.assertTrue(os.path.isfile(bucket_zip))
+
+    # unzip, should contain expected files
+    with zipfile.ZipFile(bucket_zip, "r") as bucket_zip_file:
+      self.assertEqual(bucket_zip_file.namelist(),
+                       ['dummy_file', 'res/values/version.xml'])
+
+  def testUploadAlreadyLatestVersion(self):
+    self.SetUpWorkdir(
+        xml_version=self.DEFAULT_CONFIG_VERSION,
+        gms_lib=True,
+        source_prop=True)
+
+    status = update.main([
+        'upload',
+        '--dry-run',
+        '--skip-git',
+        '--bucket', self.paths.bucket,
+        '--config', self.paths.config_file,
+        '--sdk-root', self.paths.sdk_root,
+    ])
+    self.assertEqual(status, 0, 'the command should have succeeded.')
+
+    # bucket should be empty
+    self.assertFalse(os.listdir(self.paths.bucket))
+    self.assertFalse(os.path.isfile(self.paths.config_license_sha1))
+    self.assertFalse(os.path.isfile(self.paths.config_zip_sha1))
+
+  def testDownload(self):
+    self.SetUpWorkdir(populate_bucket=True)
+
+    with _MockedInput('y'):
+      status = update.main([
+          'download',
+          '--dry-run',
+          '--bucket', self.paths.bucket,
+          '--config', self.paths.config_file,
+          '--sdk-root', self.paths.sdk_root,
+      ])
+
+    self.assertEqual(status, 0, 'the command should have succeeded.')
+
+    # sdk_root should contain zip contents, zip sha1, license
+    self.assertTrue(os.path.isfile(os.path.join(self.paths.gms_lib,
+                                                'dummy_file')))
+    self.assertTrue(os.path.isfile(self.paths.gms_root_sha1))
+    self.assertTrue(os.path.isfile(self.paths.gms_root_license))
+    self.assertEquals(_GetFileContent(self.paths.gms_root_license),
+                      self.DEFAULT_LICENSE)
+
+  def testDownloadBot(self):
+    self.SetUpWorkdir(populate_bucket=True, bot_env=True)
+
+    # No need to type 'y' on bots
+    status = update.main([
+        'download',
+        '--dry-run',
+        '--bucket', self.paths.bucket,
+        '--config', self.paths.config_file,
+        '--sdk-root', self.paths.sdk_root,
+    ])
+
+    self.assertEqual(status, 0, 'the command should have succeeded.')
+
+    # sdk_root should contain zip contents, zip sha1, license
+    self.assertTrue(os.path.isfile(os.path.join(self.paths.gms_lib,
+                                                'dummy_file')))
+    self.assertTrue(os.path.isfile(self.paths.gms_root_sha1))
+    self.assertTrue(os.path.isfile(self.paths.gms_root_license))
+    self.assertEquals(_GetFileContent(self.paths.gms_root_license),
+                      self.DEFAULT_LICENSE)
+
+  def testDownloadAlreadyUpToDate(self):
+    self.SetUpWorkdir(
+        populate_bucket=True,
+        existing_zip_sha1=self.DEFAULT_ZIP_SHA1)
+
+    status = update.main([
+        'download',
+        '--dry-run',
+        '--bucket', self.paths.bucket,
+        '--config', self.paths.config_file,
+        '--sdk-root', self.paths.sdk_root,
+    ])
+
+    self.assertEqual(status, 0, 'the command should have succeeded.')
+
+    # there should not be new files downloaded to sdk_root
+    self.assertFalse(os.path.isfile(os.path.join(self.paths.gms_lib,
+                                                 'dummy_file')))
+    self.assertFalse(os.path.isfile(self.paths.gms_root_license))
+
+  def testDownloadAcceptedLicense(self):
+    self.SetUpWorkdir(
+        populate_bucket=True,
+        existing_license=self.DEFAULT_LICENSE)
+
+    # License already accepted, no need to type
+    status = update.main([
+        'download',
+        '--dry-run',
+        '--bucket', self.paths.bucket,
+        '--config', self.paths.config_file,
+        '--sdk-root', self.paths.sdk_root,
+    ])
+
+    self.assertEqual(status, 0, 'the command should have succeeded.')
+
+    # sdk_root should contain zip contents, zip sha1, license
+    self.assertTrue(os.path.isfile(os.path.join(self.paths.gms_lib,
+                                                'dummy_file')))
+    self.assertTrue(os.path.isfile(self.paths.gms_root_sha1))
+    self.assertTrue(os.path.isfile(self.paths.gms_root_license))
+    self.assertEquals(_GetFileContent(self.paths.gms_root_license),
+                      self.DEFAULT_LICENSE)
+
+  def testDownloadNewLicense(self):
+    self.SetUpWorkdir(
+        populate_bucket=True,
+        existing_license='Old license')
+
+    with _MockedInput('y'):
+      status = update.main([
+          'download',
+          '--dry-run',
+          '--bucket', self.paths.bucket,
+          '--config', self.paths.config_file,
+          '--sdk-root', self.paths.sdk_root,
+      ])
+
+    self.assertEqual(status, 0, 'the command should have succeeded.')
+
+    # sdk_root should contain zip contents, zip sha1, NEW license
+    self.assertTrue(os.path.isfile(os.path.join(self.paths.gms_lib,
+                                                'dummy_file')))
+    self.assertTrue(os.path.isfile(self.paths.gms_root_sha1))
+    self.assertTrue(os.path.isfile(self.paths.gms_root_license))
+    self.assertEquals(_GetFileContent(self.paths.gms_root_license),
+                      self.DEFAULT_LICENSE)
+
+  def testDownloadRefusedLicense(self):
+    self.SetUpWorkdir(
+        populate_bucket=True,
+        existing_license='Old license')
+
+    with _MockedInput('n'):
+      status = update.main([
+          'download',
+          '--dry-run',
+          '--bucket', self.paths.bucket,
+          '--config', self.paths.config_file,
+          '--sdk-root', self.paths.sdk_root,
+      ])
+
+    self.assertEqual(status, 0, 'the command should have succeeded.')
+
+    # there should not be new files downloaded to sdk_root
+    self.assertFalse(os.path.isfile(os.path.join(self.paths.gms_lib,
+                                                 'dummy_file')))
+    self.assertEquals(_GetFileContent(self.paths.gms_root_license),
+                      'Old license')
+
+  def testDownloadNoAndroidSDK(self):
+    self.SetUpWorkdir(
+        populate_bucket=True,
+        existing_license='Old license')
+
+    non_existing_sdk_root = os.path.join(self.workdir, 'non_existing_sdk_root')
+    # Should not run, no typing needed
+    status = update.main([
+        'download',
+        '--dry-run',
+        '--bucket', self.paths.bucket,
+        '--config', self.paths.config_file,
+        '--sdk-root', non_existing_sdk_root,
+    ])
+
+    self.assertEqual(status, 0, 'the command should have succeeded.')
+    self.assertFalse(os.path.isdir(non_existing_sdk_root))
+
+  def SetUpWorkdir(self,
+                   bot_env=False,
+                   config_version=DEFAULT_CONFIG_VERSION,
+                   existing_license=None,
+                   existing_zip_sha1=None,
+                   gms_lib=False,
+                   populate_bucket=False,
+                   source_prop=None,
+                   xml_version=None):
+    '''Prepares workdir by putting it in the specified state
+
+    Args:
+      - general
+        bot_env: sets or unsets CHROME_HEADLESS
+
+      - bucket
+        populate_bucket: boolean. Populate the bucket with a zip and license
+                         file. The sha1s will be copied to the config directory
+
+      - config
+        config_version: number. Version of the current SDK. Defaults to
+                        `self.DEFAULT_CONFIG_VERSION`
+
+      - sdk_root
+        existing_license: string. Create a LICENSE file setting the specified
+                          text as content of the currently accepted license.
+        existing_zip_sha1: string. Create a sha1 file setting the specified
+                           hash as hash of the SDK supposed to be installed
+        gms_lib: boolean. Create a dummy file in the location of the play
+                 services SDK.
+        source_prop: boolean. Create a source.properties file that contains
+                     the license to upload.
+        xml_version: number. Create a version.xml file with the specified
+                     version that is used when uploading
+    '''
+    self.paths = Paths(self.workdir)
+
+    # Create the main directories
+    _MakeDirs(self.paths.sdk_root)
+    _MakeDirs(self.paths.config_dir)
+    _MakeDirs(self.paths.bucket)
+
+    # is not configured via argument.
+    update.SHA1_DIRECTORY = self.paths.config_dir
+
+    os.environ['CHROME_HEADLESS'] = '1' if bot_env else ''
+
+    if config_version:
+      _MakeDirs(os.path.dirname(self.paths.config_file))
+      with open(self.paths.config_file, 'w') as stream:
+        stream.write(('{"version_number":%d,'
+                      '"version_xml_path": "res/values/version.xml"}'
+                      '\n') % config_version)
+
+    if existing_license:
+      _MakeDirs(self.paths.gms_root)
+      with open(self.paths.gms_root_license, 'w') as stream:
+        stream.write(existing_license)
+
+    if existing_zip_sha1:
+      _MakeDirs(self.paths.gms_root)
+      with open(self.paths.gms_root_sha1, 'w') as stream:
+        stream.write(existing_zip_sha1)
+
+    if gms_lib:
+      _MakeDirs(self.paths.gms_lib)
+      with open(os.path.join(self.paths.gms_lib, 'dummy_file'), 'w') as stream:
+        stream.write('foo\n')
+
+    if source_prop:
+      _MakeDirs(os.path.dirname(self.paths.source_prop))
+      with open(self.paths.source_prop, 'w') as stream:
+        stream.write('Foo=Bar\n'
+                     'Pkg.License=%s\n'
+                     'Baz=Fizz\n' % self.DEFAULT_LICENSE)
+
+    if populate_bucket:
+      _MakeDirs(self.paths.config_dir)
+      bucket_dir = os.path.join(self.paths.bucket, str(config_version))
+      _MakeDirs(bucket_dir)
+
+      # TODO(dgn) should we use real sha1s? comparison with the real sha1 is
+      # done but does not do anything other than displaying a message.
+      config_license_sha1 = 'license0and0filling0to0forty0chars000000'
+      with open(self.paths.config_license_sha1, 'w') as stream:
+        stream.write(config_license_sha1)
+
+      with open(os.path.join(bucket_dir, config_license_sha1), 'w') as stream:
+        stream.write(self.DEFAULT_LICENSE)
+
+      config_zip_sha1 = self.DEFAULT_ZIP_SHA1
+      with open(self.paths.config_zip_sha1, 'w') as stream:
+        stream.write(config_zip_sha1)
+
+      pre_zip_lib = os.path.join(self.workdir, 'pre_zip_lib')
+      post_zip_lib = os.path.join(bucket_dir, config_zip_sha1)
+      _MakeDirs(pre_zip_lib)
+      with open(os.path.join(pre_zip_lib, 'dummy_file'), 'w') as stream:
+        stream.write('foo\n')
+      shutil.make_archive(post_zip_lib, 'zip', pre_zip_lib)
+      # make_archive appends .zip
+      shutil.move(post_zip_lib + '.zip', post_zip_lib)
+
+    if xml_version:
+      _MakeDirs(os.path.dirname(self.paths.xml_version))
+      with open(self.paths.xml_version, 'w') as stream:
+        stream.write(
+            '<?xml version="1.0" encoding="utf-8"?>\n'
+            '<resources>\n'
+            '    <integer name="google_play_services_version">%d</integer>\n'
+            '</resources>\n' % xml_version)
+
+
+class Paths(object):
+  '''Declaration of the paths commonly manipulated in the tests.'''
+
+  def __init__(self, workdir):
+    self.bucket = os.path.join(workdir, 'bucket')
+
+    self.config_dir = os.path.join(workdir, 'config')
+    self.config_file = os.path.join(self.config_dir, 'config.json')
+    self.config_license_sha1 = os.path.join(self.config_dir, 'LICENSE.sha1')
+    self.config_zip_sha1 = os.path.join(
+        self.config_dir,
+        'google_play_services_library.zip.sha1')
+
+    self.sdk_root = os.path.join(workdir, 'sdk_root')
+    self.gms_root = os.path.join(self.sdk_root, 'extras', 'google',
+                                 'google_play_services')
+    self.gms_root_sha1 = os.path.join(self.gms_root,
+                                      'google_play_services_library.zip.sha1')
+    self.gms_root_license = os.path.join(self.gms_root, 'LICENSE')
+    self.source_prop = os.path.join(self.gms_root, 'source.properties')
+    self.gms_lib = os.path.join(self.gms_root, 'libproject',
+                                'google-play-services_lib')
+    self.xml_version = os.path.join(self.gms_lib, 'res', 'values',
+                                    'version.xml')
+
+
+def _GetFileContent(file_path):
+  with open(file_path, 'r') as stream:
+    return stream.read()
+
+
+def _MakeDirs(path):
+  '''Avoids having to do the error handling everywhere.'''
+  if not os.path.exists(path):
+    os.makedirs(path)
+
+
+@contextlib.contextmanager
+def _MockedInput(typed_string):
+  '''Makes raw_input return |typed_string| while inside the context.'''
+  try:
+    original_raw_input = __builtins__.raw_input
+    __builtins__.raw_input = lambda _: typed_string
+    yield
+  finally:
+    __builtins__.raw_input = original_raw_input
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/build/android/play_services/utils.py b/build/android/play_services/utils.py
new file mode 100644
index 0000000..acc6cf4
--- /dev/null
+++ b/build/android/play_services/utils.py
@@ -0,0 +1,170 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+'''
+Utility functions for all things related to manipulating google play services
+related files.
+'''
+
+import argparse
+import filecmp
+import json
+import logging
+import os
+import re
+import sys
+
+sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
+from devil.utils import cmd_helper
+
+
+_XML_VERSION_NUMBER_PATTERN = re.compile(
+    r'<integer name="google_play_services_version">(\d+)<\/integer>')
+
+
+class DefaultsRawHelpFormatter(argparse.ArgumentDefaultsHelpFormatter,
+                               argparse.RawDescriptionHelpFormatter):
+  '''
+  Combines the features of RawDescriptionHelpFormatter and
+  ArgumentDefaultsHelpFormatter, providing defaults for the arguments and raw
+  text for the description.
+  '''
+  pass
+
+
+class ConfigParser(object):
+  '''Reads and writes the configuration files for play services related scripts
+
+  The configuration files are JSON files. Here is the data they are expected
+  to contain:
+
+   -  version_number
+      Number. Mirrors @integer/google_play_services_version from the library.
+      Example: 815000
+
+   -  sdk_version
+      Version of the Play Services SDK to retrieve, when preprocessing the
+      library from a maven/gradle repository.
+      Example: "8.1.0"
+
+   -  clients
+      List of strings. Name of the clients (or play services modules) to
+      include when preprocessing the library.
+      Example: ["play-services-base", "play-services-cast"]
+
+   -  version_xml_path
+      String. Path to the version.xml string describing the current version.
+      Should be relative to the library base directory
+      Example: "res/values/version.xml"
+
+   -  locale_whitelist
+      List of strings. List of locales to keep from the resources. Can be
+      obtained by generating an android build and looking at the content of
+      `out/Debug/gen/chrome/java/res`; or looking at the android section in
+      `//chrome/app/generated_resources.grd`
+      Example: ["am", "ar", "bg", "ca", "cs"]
+
+   - resource_whitelist
+     List of strings. List of resource files to explicitely keep in the final
+     output. Use it to keep drawables for example, as we currently remove them
+     all.
+     Example: ["play-services-base/res/drawables/foobar.xml"]
+  '''
+  _VERSION_NUMBER_KEY = 'version_number'
+
+  def __init__(self, path):
+    self.path = path
+    self._data = {}
+
+    with open(path, 'r') as stream:
+      self._data = json.load(stream)
+
+  @property
+  def version_number(self):
+    return self._data.get(self._VERSION_NUMBER_KEY)
+
+  @property
+  def sdk_version(self):
+    return self._data.get('sdk_version')
+
+  @property
+  def clients(self):
+    return self._data.get('clients') or []
+
+  @property
+  def version_xml_path(self):
+    return self._data.get('version_xml_path')
+
+  @property
+  def locale_whitelist(self):
+    return self._data.get('locale_whitelist') or []
+
+  @property
+  def resource_whitelist(self):
+    return self._data.get('resource_whitelist') or []
+
+  def UpdateVersionNumber(self, new_version_number):
+    '''Updates the version number and saves it in the configuration file. '''
+
+    with open(self.path, 'w') as stream:
+      self._data[self._VERSION_NUMBER_KEY] = new_version_number
+      stream.write(DumpTrimmedJson(self._data))
+
+
+def DumpTrimmedJson(json_data):
+  '''
+  Default formatting when dumping json to string has trailing spaces and lacks
+  a new line at the end. This function fixes that.
+  '''
+
+  out = json.dumps(json_data, sort_keys=True, indent=2)
+  out = out.replace(' ' + os.linesep, os.linesep)
+  return out + os.linesep
+
+
+def FileEquals(expected_file, actual_file):
+  '''
+  Returns whether the two files are equal. Returns False if any of the files
+  doesn't exist.
+  '''
+
+  if not os.path.isfile(actual_file) or not os.path.isfile(expected_file):
+    return False
+  return filecmp.cmp(expected_file, actual_file)
+
+
+def IsRepoDirty(repo_root):
+  '''Returns True if there are no staged or modified files, False otherwise.'''
+
+  # diff-index returns 1 if there are staged changes or modified files,
+  # 0 otherwise
+  cmd = ['git', 'diff-index', '--quiet', 'HEAD']
+  return cmd_helper.Call(cmd, cwd=repo_root) == 1
+
+
+def GetVersionNumberFromLibraryResources(version_xml):
+  '''
+  Extracts a Google Play services version number from its version.xml file.
+  '''
+
+  with open(version_xml, 'r') as version_file:
+    version_file_content = version_file.read()
+
+  match = _XML_VERSION_NUMBER_PATTERN.search(version_file_content)
+  if not match:
+    raise AttributeError('A value for google_play_services_version was not '
+                         'found in ' + version_xml)
+  return int(match.group(1))
+
+
+def MakeLocalCommit(repo_root, files_to_commit, message):
+  '''Makes a local git commit.'''
+
+  logging.debug('Staging files (%s) for commit.', files_to_commit)
+  if cmd_helper.Call(['git', 'add'] + files_to_commit, cwd=repo_root) != 0:
+    raise Exception('The local commit failed.')
+
+  logging.debug('Committing.')
+  if cmd_helper.Call(['git', 'commit', '-m', message], cwd=repo_root) != 0:
+    raise Exception('The local commit failed.')
diff --git a/build/android/provision_devices.py b/build/android/provision_devices.py
new file mode 100755
index 0000000..11b2862
--- /dev/null
+++ b/build/android/provision_devices.py
@@ -0,0 +1,565 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Provisions Android devices with settings required for bots.
+
+Usage:
+  ./provision_devices.py [-d <device serial number>]
+"""
+
+import argparse
+import datetime
+import json
+import logging
+import os
+import posixpath
+import re
+import subprocess
+import sys
+import time
+
+# Import _strptime before threaded code. datetime.datetime.strptime is
+# threadsafe except for the initial import of the _strptime module.
+# See crbug.com/584730 and https://bugs.python.org/issue7980.
+import _strptime  # pylint: disable=unused-import
+
+import devil_chromium
+from devil import devil_env
+from devil.android import battery_utils
+from devil.android import device_blacklist
+from devil.android import device_errors
+from devil.android import device_temp_file
+from devil.android import device_utils
+from devil.android.sdk import keyevent
+from devil.android.sdk import version_codes
+from devil.constants import exit_codes
+from devil.utils import run_tests_helper
+from devil.utils import timeout_retry
+from pylib import constants
+from pylib import device_settings
+from pylib.constants import host_paths
+
+_SYSTEM_WEBVIEW_PATHS = ['/system/app/webview', '/system/app/WebViewGoogle']
+_CHROME_PACKAGE_REGEX = re.compile('.*chrom.*')
+_TOMBSTONE_REGEX = re.compile('tombstone.*')
+
+
+class _DEFAULT_TIMEOUTS(object):
+  # L can take a while to reboot after a wipe.
+  LOLLIPOP = 600
+  PRE_LOLLIPOP = 180
+
+  HELP_TEXT = '{}s on L, {}s on pre-L'.format(LOLLIPOP, PRE_LOLLIPOP)
+
+
+class _PHASES(object):
+  WIPE = 'wipe'
+  PROPERTIES = 'properties'
+  FINISH = 'finish'
+
+  ALL = [WIPE, PROPERTIES, FINISH]
+
+
+def ProvisionDevices(args):
+  blacklist = (device_blacklist.Blacklist(args.blacklist_file)
+               if args.blacklist_file
+               else None)
+  devices = [d for d in device_utils.DeviceUtils.HealthyDevices(blacklist)
+             if not args.emulators or d.adb.is_emulator]
+  if args.device:
+    devices = [d for d in devices if d == args.device]
+  if not devices:
+    raise device_errors.DeviceUnreachableError(args.device)
+  parallel_devices = device_utils.DeviceUtils.parallel(devices)
+  if args.emulators:
+    parallel_devices.pMap(SetProperties, args)
+  else:
+    parallel_devices.pMap(ProvisionDevice, blacklist, args)
+  if args.auto_reconnect:
+    _LaunchHostHeartbeat()
+  blacklisted_devices = blacklist.Read() if blacklist else []
+  if args.output_device_blacklist:
+    with open(args.output_device_blacklist, 'w') as f:
+      json.dump(blacklisted_devices, f)
+  if all(d in blacklisted_devices for d in devices):
+    raise device_errors.NoDevicesError
+  return 0
+
+
+def ProvisionDevice(device, blacklist, options):
+  if options.reboot_timeout:
+    reboot_timeout = options.reboot_timeout
+  elif device.build_version_sdk >= version_codes.LOLLIPOP:
+    reboot_timeout = _DEFAULT_TIMEOUTS.LOLLIPOP
+  else:
+    reboot_timeout = _DEFAULT_TIMEOUTS.PRE_LOLLIPOP
+
+  def should_run_phase(phase_name):
+    return not options.phases or phase_name in options.phases
+
+  def run_phase(phase_func, reboot=True):
+    try:
+      device.WaitUntilFullyBooted(timeout=reboot_timeout, retries=0)
+    except device_errors.CommandTimeoutError:
+      logging.error('Device did not finish booting. Will try to reboot.')
+      device.Reboot(timeout=reboot_timeout)
+    phase_func(device, options)
+    if reboot:
+      device.Reboot(False, retries=0)
+      device.adb.WaitForDevice()
+
+  try:
+    if should_run_phase(_PHASES.WIPE):
+      if (options.chrome_specific_wipe or device.IsUserBuild() or
+          device.build_version_sdk >= version_codes.MARSHMALLOW):
+        run_phase(WipeChromeData)
+      else:
+        run_phase(WipeDevice)
+
+    if should_run_phase(_PHASES.PROPERTIES):
+      run_phase(SetProperties)
+
+    if should_run_phase(_PHASES.FINISH):
+      run_phase(FinishProvisioning, reboot=False)
+
+    if options.chrome_specific_wipe:
+      package = "com.google.android.gms"
+      version_name = device.GetApplicationVersion(package)
+      logging.info("Version name for %s is %s", package, version_name)
+
+    CheckExternalStorage(device)
+
+  except device_errors.CommandTimeoutError:
+    logging.exception('Timed out waiting for device %s. Adding to blacklist.',
+                      str(device))
+    if blacklist:
+      blacklist.Extend([str(device)], reason='provision_timeout')
+
+  except device_errors.CommandFailedError:
+    logging.exception('Failed to provision device %s. Adding to blacklist.',
+                      str(device))
+    if blacklist:
+      blacklist.Extend([str(device)], reason='provision_failure')
+
+def CheckExternalStorage(device):
+  """Checks that storage is writable and if not makes it writable.
+
+  Arguments:
+    device: The device to check.
+  """
+  try:
+    with device_temp_file.DeviceTempFile(
+        device.adb, suffix='.sh', dir=device.GetExternalStoragePath()) as f:
+      device.WriteFile(f.name, 'test')
+  except device_errors.CommandFailedError:
+    logging.info('External storage not writable. Remounting / as RW')
+    device.RunShellCommand(['mount', '-o', 'remount,rw', '/'],
+                           check_return=True, as_root=True)
+    device.EnableRoot()
+    with device_temp_file.DeviceTempFile(
+        device.adb, suffix='.sh', dir=device.GetExternalStoragePath()) as f:
+      device.WriteFile(f.name, 'test')
+
+def WipeChromeData(device, options):
+  """Wipes chrome specific data from device
+
+  (1) uninstall any app whose name matches *chrom*, except
+      com.android.chrome, which is the chrome stable package. Doing so also
+      removes the corresponding dirs under /data/data/ and /data/app/
+  (2) remove any dir under /data/app-lib/ whose name matches *chrom*
+  (3) remove any files under /data/tombstones/ whose name matches "tombstone*"
+  (4) remove /data/local.prop if there is any
+  (5) remove /data/local/chrome-command-line if there is any
+  (6) remove anything under /data/local/.config/ if the dir exists
+      (this is telemetry related)
+  (7) remove anything under /data/local/tmp/
+
+  Arguments:
+    device: the device to wipe
+  """
+  if options.skip_wipe:
+    return
+
+  try:
+    if device.IsUserBuild():
+      _UninstallIfMatch(device, _CHROME_PACKAGE_REGEX,
+                        constants.PACKAGE_INFO['chrome_stable'].package)
+      device.RunShellCommand('rm -rf %s/*' % device.GetExternalStoragePath(),
+                             check_return=True)
+      device.RunShellCommand('rm -rf /data/local/tmp/*', check_return=True)
+    else:
+      device.EnableRoot()
+      _UninstallIfMatch(device, _CHROME_PACKAGE_REGEX,
+                        constants.PACKAGE_INFO['chrome_stable'].package)
+      _WipeUnderDirIfMatch(device, '/data/app-lib/', _CHROME_PACKAGE_REGEX)
+      _WipeUnderDirIfMatch(device, '/data/tombstones/', _TOMBSTONE_REGEX)
+
+      _WipeFileOrDir(device, '/data/local.prop')
+      _WipeFileOrDir(device, '/data/local/chrome-command-line')
+      _WipeFileOrDir(device, '/data/local/.config/')
+      _WipeFileOrDir(device, '/data/local/tmp/')
+      device.RunShellCommand('rm -rf %s/*' % device.GetExternalStoragePath(),
+                             check_return=True)
+  except device_errors.CommandFailedError:
+    logging.exception('Possible failure while wiping the device. '
+                      'Attempting to continue.')
+
+
+def WipeDevice(device, options):
+  """Wipes data from device, keeping only the adb_keys for authorization.
+
+  After wiping data on a device that has been authorized, adb can still
+  communicate with the device, but after reboot the device will need to be
+  re-authorized because the adb keys file is stored in /data/misc/adb/.
+  Thus, adb_keys file is rewritten so the device does not need to be
+  re-authorized.
+
+  Arguments:
+    device: the device to wipe
+  """
+  if options.skip_wipe:
+    return
+
+  try:
+    device.EnableRoot()
+    device_authorized = device.FileExists(constants.ADB_KEYS_FILE)
+    if device_authorized:
+      adb_keys = device.ReadFile(constants.ADB_KEYS_FILE,
+                                 as_root=True).splitlines()
+    device.RunShellCommand(['wipe', 'data'],
+                           as_root=True, check_return=True)
+    device.adb.WaitForDevice()
+
+    if device_authorized:
+      adb_keys_set = set(adb_keys)
+      for adb_key_file in options.adb_key_files or []:
+        try:
+          with open(adb_key_file, 'r') as f:
+            adb_public_keys = f.readlines()
+          adb_keys_set.update(adb_public_keys)
+        except IOError:
+          logging.warning('Unable to find adb keys file %s.', adb_key_file)
+      _WriteAdbKeysFile(device, '\n'.join(adb_keys_set))
+  except device_errors.CommandFailedError:
+    logging.exception('Possible failure while wiping the device. '
+                      'Attempting to continue.')
+
+
+def _WriteAdbKeysFile(device, adb_keys_string):
+  dir_path = posixpath.dirname(constants.ADB_KEYS_FILE)
+  device.RunShellCommand(['mkdir', '-p', dir_path],
+                         as_root=True, check_return=True)
+  device.RunShellCommand(['restorecon', dir_path],
+                         as_root=True, check_return=True)
+  device.WriteFile(constants.ADB_KEYS_FILE, adb_keys_string, as_root=True)
+  device.RunShellCommand(['restorecon', constants.ADB_KEYS_FILE],
+                         as_root=True, check_return=True)
+
+
+def SetProperties(device, options):
+  try:
+    device.EnableRoot()
+  except device_errors.CommandFailedError as e:
+    logging.warning(str(e))
+
+  if not device.IsUserBuild():
+    _ConfigureLocalProperties(device, options.enable_java_debug)
+  else:
+    logging.warning('Cannot configure properties in user builds.')
+  device_settings.ConfigureContentSettings(
+      device, device_settings.DETERMINISTIC_DEVICE_SETTINGS)
+  if options.disable_location:
+    device_settings.ConfigureContentSettings(
+        device, device_settings.DISABLE_LOCATION_SETTINGS)
+  else:
+    device_settings.ConfigureContentSettings(
+        device, device_settings.ENABLE_LOCATION_SETTINGS)
+
+  if options.disable_mock_location:
+    device_settings.ConfigureContentSettings(
+        device, device_settings.DISABLE_MOCK_LOCATION_SETTINGS)
+  else:
+    device_settings.ConfigureContentSettings(
+        device, device_settings.ENABLE_MOCK_LOCATION_SETTINGS)
+
+  device_settings.SetLockScreenSettings(device)
+  if options.disable_network:
+    device_settings.ConfigureContentSettings(
+        device, device_settings.NETWORK_DISABLED_SETTINGS)
+
+  if options.disable_system_chrome:
+    # The system chrome version on the device interferes with some tests.
+    device.RunShellCommand(['pm', 'disable', 'com.android.chrome'],
+                           check_return=True)
+
+  if options.remove_system_webview:
+    if any(device.PathExists(p) for p in _SYSTEM_WEBVIEW_PATHS):
+      logging.info('System WebView exists and needs to be removed')
+      if device.HasRoot():
+        # Disabled Marshmallow's Verity security feature
+        if device.build_version_sdk >= version_codes.MARSHMALLOW:
+          device.adb.DisableVerity()
+          device.Reboot()
+          device.WaitUntilFullyBooted()
+          device.EnableRoot()
+
+        # This is required, e.g., to replace the system webview on a device.
+        device.adb.Remount()
+        device.RunShellCommand(['stop'], check_return=True)
+        device.RunShellCommand(['rm', '-rf'] + _SYSTEM_WEBVIEW_PATHS,
+                               check_return=True)
+        device.RunShellCommand(['start'], check_return=True)
+      else:
+        logging.warning('Cannot remove system webview from a non-rooted device')
+    else:
+      logging.info('System WebView already removed')
+
+  # Some device types can momentarily disappear after setting properties.
+  device.adb.WaitForDevice()
+
+
+def _ConfigureLocalProperties(device, java_debug=True):
+  """Set standard readonly testing device properties prior to reboot."""
+  local_props = [
+      'persist.sys.usb.config=adb',
+      'ro.monkey=1',
+      'ro.test_harness=1',
+      'ro.audio.silent=1',
+      'ro.setupwizard.mode=DISABLED',
+      ]
+  if java_debug:
+    local_props.append(
+        '%s=all' % device_utils.DeviceUtils.JAVA_ASSERT_PROPERTY)
+    local_props.append('debug.checkjni=1')
+  try:
+    device.WriteFile(
+        device.LOCAL_PROPERTIES_PATH,
+        '\n'.join(local_props), as_root=True)
+    # Android will not respect the local props file if it is world writable.
+    device.RunShellCommand(
+        ['chmod', '644', device.LOCAL_PROPERTIES_PATH],
+        as_root=True, check_return=True)
+  except device_errors.CommandFailedError:
+    logging.exception('Failed to configure local properties.')
+
+
+def FinishProvisioning(device, options):
+  # The lockscreen can't be disabled on user builds, so send a keyevent
+  # to unlock it.
+  if device.IsUserBuild():
+    device.SendKeyEvent(keyevent.KEYCODE_MENU)
+
+  if options.min_battery_level is not None:
+    battery = battery_utils.BatteryUtils(device)
+    try:
+      battery.ChargeDeviceToLevel(options.min_battery_level)
+    except device_errors.DeviceChargingError:
+      device.Reboot()
+      battery.ChargeDeviceToLevel(options.min_battery_level)
+
+  if options.max_battery_temp is not None:
+    try:
+      battery = battery_utils.BatteryUtils(device)
+      battery.LetBatteryCoolToTemperature(options.max_battery_temp)
+    except device_errors.CommandFailedError:
+      logging.exception('Unable to let battery cool to specified temperature.')
+
+  def _set_and_verify_date():
+    if device.build_version_sdk >= version_codes.MARSHMALLOW:
+      date_format = '%m%d%H%M%Y.%S'
+      set_date_command = ['date', '-u']
+      get_date_command = ['date', '-u']
+    else:
+      date_format = '%Y%m%d.%H%M%S'
+      set_date_command = ['date', '-s']
+      get_date_command = ['date']
+
+    # TODO(jbudorick): This is wrong on pre-M devices -- get/set are
+    # dealing in local time, but we're setting based on GMT.
+    strgmtime = time.strftime(date_format, time.gmtime())
+    set_date_command.append(strgmtime)
+    device.RunShellCommand(set_date_command, as_root=True, check_return=True)
+
+    get_date_command.append('+"%Y%m%d.%H%M%S"')
+    device_time = device.RunShellCommand(
+        get_date_command, as_root=True, single_line=True).replace('"', '')
+    device_time = datetime.datetime.strptime(device_time, "%Y%m%d.%H%M%S")
+    correct_time = datetime.datetime.strptime(strgmtime, date_format)
+    tdelta = (correct_time - device_time).seconds
+    if tdelta <= 1:
+      logging.info('Date/time successfully set on %s', device)
+      return True
+    else:
+      logging.error('Date mismatch. Device: %s Correct: %s',
+                    device_time.isoformat(), correct_time.isoformat())
+      return False
+
+  # Sometimes the date is not set correctly on the devices. Retry on failure.
+  if device.IsUserBuild():
+    # TODO(bpastene): Figure out how to set the date & time on user builds.
+    pass
+  else:
+    if not timeout_retry.WaitFor(
+        _set_and_verify_date, wait_period=1, max_tries=2):
+      raise device_errors.CommandFailedError(
+          'Failed to set date & time.', device_serial=str(device))
+
+  props = device.RunShellCommand('getprop', check_return=True)
+  for prop in props:
+    logging.info('  %s', prop)
+  if options.auto_reconnect:
+    _PushAndLaunchAdbReboot(device, options.target)
+
+
+def _UninstallIfMatch(device, pattern, app_to_keep):
+  installed_packages = device.RunShellCommand(['pm', 'list', 'packages'])
+  installed_system_packages = [
+      pkg.split(':')[1] for pkg in device.RunShellCommand(['pm', 'list',
+                                                           'packages', '-s'])]
+  for package_output in installed_packages:
+    package = package_output.split(":")[1]
+    if pattern.match(package) and not package == app_to_keep:
+      if not device.IsUserBuild() or package not in installed_system_packages:
+        device.Uninstall(package)
+
+
+def _WipeUnderDirIfMatch(device, path, pattern):
+  ls_result = device.Ls(path)
+  for (content, _) in ls_result:
+    if pattern.match(content):
+      _WipeFileOrDir(device, path + content)
+
+
+def _WipeFileOrDir(device, path):
+  if device.PathExists(path):
+    device.RunShellCommand(['rm', '-rf', path], check_return=True)
+
+
+def _PushAndLaunchAdbReboot(device, target):
+  """Pushes and launches the adb_reboot binary on the device.
+
+  Arguments:
+    device: The DeviceUtils instance for the device to which the adb_reboot
+            binary should be pushed.
+    target: The build target (example, Debug or Release) which helps in
+            locating the adb_reboot binary.
+  """
+  logging.info('Will push and launch adb_reboot on %s', str(device))
+  # Kill if adb_reboot is already running.
+  device.KillAll('adb_reboot', blocking=True, timeout=2, quiet=True)
+  # Push adb_reboot
+  logging.info('  Pushing adb_reboot ...')
+  adb_reboot = os.path.join(host_paths.DIR_SOURCE_ROOT,
+                            'out/%s/adb_reboot' % target)
+  device.PushChangedFiles([(adb_reboot, '/data/local/tmp/')])
+  # Launch adb_reboot
+  logging.info('  Launching adb_reboot ...')
+  device.RunShellCommand(
+      ['/data/local/tmp/adb_reboot'],
+      check_return=True)
+
+
+def _LaunchHostHeartbeat():
+  # Kill if existing host_heartbeat
+  KillHostHeartbeat()
+  # Launch a new host_heartbeat
+  logging.info('Spawning host heartbeat...')
+  subprocess.Popen([os.path.join(host_paths.DIR_SOURCE_ROOT,
+                                 'build/android/host_heartbeat.py')])
+
+def KillHostHeartbeat():
+  ps = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
+  stdout, _ = ps.communicate()
+  matches = re.findall('\\n.*host_heartbeat.*', stdout)
+  for match in matches:
+    logging.info('An instance of host heart beart running... will kill')
+    pid = re.findall(r'(\S+)', match)[1]
+    subprocess.call(['kill', str(pid)])
+
+def main():
+  # Recommended options on perf bots:
+  # --disable-network
+  #     TODO(tonyg): We eventually want network on. However, currently radios
+  #     can cause perfbots to drain faster than they charge.
+  # --min-battery-level 95
+  #     Some perf bots run benchmarks with USB charging disabled which leads
+  #     to gradual draining of the battery. We must wait for a full charge
+  #     before starting a run in order to keep the devices online.
+
+  parser = argparse.ArgumentParser(
+      description='Provision Android devices with settings required for bots.')
+  parser.add_argument('-d', '--device', metavar='SERIAL',
+                      help='the serial number of the device to be provisioned'
+                      ' (the default is to provision all devices attached)')
+  parser.add_argument('--adb-path',
+                      help='Absolute path to the adb binary to use.')
+  parser.add_argument('--blacklist-file', help='Device blacklist JSON file.')
+  parser.add_argument('--phase', action='append', choices=_PHASES.ALL,
+                      dest='phases',
+                      help='Phases of provisioning to run. '
+                           '(If omitted, all phases will be run.)')
+  parser.add_argument('--skip-wipe', action='store_true', default=False,
+                      help="don't wipe device data during provisioning")
+  parser.add_argument('--reboot-timeout', metavar='SECS', type=int,
+                      help='when wiping the device, max number of seconds to'
+                      ' wait after each reboot '
+                      '(default: %s)' % _DEFAULT_TIMEOUTS.HELP_TEXT)
+  parser.add_argument('--min-battery-level', type=int, metavar='NUM',
+                      help='wait for the device to reach this minimum battery'
+                      ' level before trying to continue')
+  parser.add_argument('--disable-location', action='store_true',
+                      help='disable Google location services on devices')
+  parser.add_argument('--disable-mock-location', action='store_true',
+                      default=False, help='Set ALLOW_MOCK_LOCATION to false')
+  parser.add_argument('--disable-network', action='store_true',
+                      help='disable network access on devices')
+  parser.add_argument('--disable-java-debug', action='store_false',
+                      dest='enable_java_debug', default=True,
+                      help='disable Java property asserts and JNI checking')
+  parser.add_argument('--disable-system-chrome', action='store_true',
+                      help='Disable the system chrome from devices.')
+  parser.add_argument('--remove-system-webview', action='store_true',
+                      help='Remove the system webview from devices.')
+  parser.add_argument('-t', '--target', default='Debug',
+                      help='the build target (default: %(default)s)')
+  parser.add_argument('-r', '--auto-reconnect', action='store_true',
+                      help='push binary which will reboot the device on adb'
+                      ' disconnections')
+  parser.add_argument('--adb-key-files', type=str, nargs='+',
+                      help='list of adb keys to push to device')
+  parser.add_argument('-v', '--verbose', action='count', default=1,
+                      help='Log more information.')
+  parser.add_argument('--max-battery-temp', type=int, metavar='NUM',
+                      help='Wait for the battery to have this temp or lower.')
+  parser.add_argument('--output-device-blacklist',
+                      help='Json file to output the device blacklist.')
+  parser.add_argument('--chrome-specific-wipe', action='store_true',
+                      help='only wipe chrome specific data during provisioning')
+  parser.add_argument('--emulators', action='store_true',
+                      help='provision only emulators and ignore usb devices')
+  args = parser.parse_args()
+  constants.SetBuildType(args.target)
+
+  run_tests_helper.SetLogLevel(args.verbose)
+
+  devil_custom_deps = None
+  if args.adb_path:
+    devil_custom_deps = {
+      'adb': {
+        devil_env.GetPlatform(): [args.adb_path],
+      },
+    }
+
+  devil_chromium.Initialize(custom_deps=devil_custom_deps)
+
+  try:
+    return ProvisionDevices(args)
+  except (device_errors.DeviceUnreachableError, device_errors.NoDevicesError):
+    return exit_codes.INFRA
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/build/android/push_libraries.gypi b/build/android/push_libraries.gypi
new file mode 100644
index 0000000..8bce798
--- /dev/null
+++ b/build/android/push_libraries.gypi
@@ -0,0 +1,49 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This file is meant to be included into an action to provide a rule that
+# pushes stripped shared libraries to the attached Android device. This should
+# only be used with the gyp_managed_install flag set.
+#
+# To use this, create a gyp target with the following form:
+#  {
+#    'actions': [
+#      'variables': {
+#        'ordered_libraries_file': 'file generated by write_ordered_libraries'
+#        'strip_stamp': 'stamp from strip action to block on'
+#        'libraries_source_dir': 'location where stripped libraries are stored'
+#        'device_library_dir': 'location on the device where to put pushed libraries',
+#        'push_stamp': 'file to touch when the action is complete'
+#        'configuration_name': 'The build CONFIGURATION_NAME'
+#      },
+#      'includes': [ '../../build/android/push_libraries.gypi' ],
+#    ],
+#  },
+#
+
+{
+  'action_name': 'push_libraries_<(_target_name)',
+  'message': 'Pushing libraries to device for <(_target_name)',
+  'inputs': [
+    '<(DEPTH)/build/android/gyp/util/build_utils.py',
+    '<(DEPTH)/build/android/gyp/util/md5_check.py',
+    '<(DEPTH)/build/android/gyp/push_libraries.py',
+    '<(strip_stamp)',
+    '<(strip_additional_stamp)',
+    '<(build_device_config_path)',
+    '<(pack_relocations_stamp)',
+  ],
+  'outputs': [
+    '<(push_stamp)',
+  ],
+  'action': [
+    'python', '<(DEPTH)/build/android/gyp/push_libraries.py',
+    '--build-device-configuration=<(build_device_config_path)',
+    '--libraries-dir=<(libraries_source_dir)',
+    '--device-dir=<(device_library_dir)',
+    '--libraries=@FileArg(<(ordered_libraries_file):libraries)',
+    '--stamp=<(push_stamp)',
+    '--output-directory=<(PRODUCT_DIR)',
+  ],
+}
diff --git a/build/android/pylib/OWNERS b/build/android/pylib/OWNERS
new file mode 100644
index 0000000..dbbbba7
--- /dev/null
+++ b/build/android/pylib/OWNERS
@@ -0,0 +1,4 @@
+jbudorick@chromium.org
+klundberg@chromium.org
+navabi@chromium.org
+skyostil@chromium.org
diff --git a/build/android/pylib/__init__.py b/build/android/pylib/__init__.py
new file mode 100644
index 0000000..16ee312
--- /dev/null
+++ b/build/android/pylib/__init__.py
@@ -0,0 +1,13 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+
+_DEVIL_PATH = os.path.abspath(os.path.join(
+    os.path.dirname(__file__), '..', '..', '..', 'third_party', 'catapult',
+    'devil'))
+
+if _DEVIL_PATH not in sys.path:
+  sys.path.append(_DEVIL_PATH)
diff --git a/build/android/pylib/base/__init__.py b/build/android/pylib/base/__init__.py
new file mode 100644
index 0000000..727e987
--- /dev/null
+++ b/build/android/pylib/base/__init__.py
@@ -0,0 +1,4 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
diff --git a/build/android/pylib/base/base_test_result.py b/build/android/pylib/base/base_test_result.py
new file mode 100644
index 0000000..af4b71c
--- /dev/null
+++ b/build/android/pylib/base/base_test_result.py
@@ -0,0 +1,228 @@
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Module containing base test results classes."""
+
+import threading
+
+
+class ResultType(object):
+  """Class enumerating test types."""
+  PASS = 'PASS'
+  SKIP = 'SKIP'
+  FAIL = 'FAIL'
+  CRASH = 'CRASH'
+  TIMEOUT = 'TIMEOUT'
+  UNKNOWN = 'UNKNOWN'
+
+  @staticmethod
+  def GetTypes():
+    """Get a list of all test types."""
+    return [ResultType.PASS, ResultType.SKIP, ResultType.FAIL,
+            ResultType.CRASH, ResultType.TIMEOUT, ResultType.UNKNOWN]
+
+
+class BaseTestResult(object):
+  """Base class for a single test result."""
+
+  def __init__(self, name, test_type, duration=0, log=''):
+    """Construct a BaseTestResult.
+
+    Args:
+      name: Name of the test which defines uniqueness.
+      test_type: Type of the test result as defined in ResultType.
+      duration: Time it took for the test to run in milliseconds.
+      log: An optional string listing any errors.
+    """
+    assert name
+    assert test_type in ResultType.GetTypes()
+    self._name = name
+    self._test_type = test_type
+    self._duration = duration
+    self._log = log
+
+  def __str__(self):
+    return self._name
+
+  def __repr__(self):
+    return self._name
+
+  def __cmp__(self, other):
+    # pylint: disable=W0212
+    return cmp(self._name, other._name)
+
+  def __hash__(self):
+    return hash(self._name)
+
+  def SetName(self, name):
+    """Set the test name.
+
+    Because we're putting this into a set, this should only be used if moving
+    this test result into another set.
+    """
+    self._name = name
+
+  def GetName(self):
+    """Get the test name."""
+    return self._name
+
+  def SetType(self, test_type):
+    """Set the test result type."""
+    assert test_type in ResultType.GetTypes()
+    self._test_type = test_type
+
+  def GetType(self):
+    """Get the test result type."""
+    return self._test_type
+
+  def GetDuration(self):
+    """Get the test duration."""
+    return self._duration
+
+  def SetLog(self, log):
+    """Set the test log."""
+    self._log = log
+
+  def GetLog(self):
+    """Get the test log."""
+    return self._log
+
+
+class TestRunResults(object):
+  """Set of results for a test run."""
+
+  def __init__(self):
+    self._results = set()
+    self._results_lock = threading.RLock()
+
+  def GetLogs(self):
+    """Get the string representation of all test logs."""
+    with self._results_lock:
+      s = []
+      for test_type in ResultType.GetTypes():
+        if test_type != ResultType.PASS:
+          for t in sorted(self._GetType(test_type)):
+            log = t.GetLog()
+            if log:
+              s.append('[%s] %s:' % (test_type, t))
+              s.append(log)
+      return '\n'.join(s)
+
+  def GetGtestForm(self):
+    """Get the gtest string representation of this object."""
+    with self._results_lock:
+      s = []
+      plural = lambda n, s, p: '%d %s' % (n, p if n != 1 else s)
+      tests = lambda n: plural(n, 'test', 'tests')
+
+      s.append('[==========] %s ran.' % (tests(len(self.GetAll()))))
+      s.append('[  PASSED  ] %s.' % (tests(len(self.GetPass()))))
+
+      skipped = self.GetSkip()
+      if skipped:
+        s.append('[  SKIPPED ] Skipped %s, listed below:' % tests(len(skipped)))
+        for t in sorted(skipped):
+          s.append('[  SKIPPED ] %s' % str(t))
+
+      all_failures = self.GetFail().union(self.GetCrash(), self.GetTimeout(),
+          self.GetUnknown())
+      if all_failures:
+        s.append('[  FAILED  ] %s, listed below:' % tests(len(all_failures)))
+        for t in sorted(self.GetFail()):
+          s.append('[  FAILED  ] %s' % str(t))
+        for t in sorted(self.GetCrash()):
+          s.append('[  FAILED  ] %s (CRASHED)' % str(t))
+        for t in sorted(self.GetTimeout()):
+          s.append('[  FAILED  ] %s (TIMEOUT)' % str(t))
+        for t in sorted(self.GetUnknown()):
+          s.append('[  FAILED  ] %s (UNKNOWN)' % str(t))
+        s.append('')
+        s.append(plural(len(all_failures), 'FAILED TEST', 'FAILED TESTS'))
+      return '\n'.join(s)
+
+  def GetShortForm(self):
+    """Get the short string representation of this object."""
+    with self._results_lock:
+      s = []
+      s.append('ALL: %d' % len(self._results))
+      for test_type in ResultType.GetTypes():
+        s.append('%s: %d' % (test_type, len(self._GetType(test_type))))
+      return ''.join([x.ljust(15) for x in s])
+
+  def __str__(self):
+    return self.GetGtestForm()
+
+  def AddResult(self, result):
+    """Add |result| to the set.
+
+    Args:
+      result: An instance of BaseTestResult.
+    """
+    assert isinstance(result, BaseTestResult)
+    with self._results_lock:
+      self._results.add(result)
+
+  def AddResults(self, results):
+    """Add |results| to the set.
+
+    Args:
+      results: An iterable of BaseTestResult objects.
+    """
+    with self._results_lock:
+      for t in results:
+        self.AddResult(t)
+
+  def AddTestRunResults(self, results):
+    """Add the set of test results from |results|.
+
+    Args:
+      results: An instance of TestRunResults.
+    """
+    assert isinstance(results, TestRunResults)
+    with self._results_lock:
+      # pylint: disable=W0212
+      self._results.update(results._results)
+
+  def GetAll(self):
+    """Get the set of all test results."""
+    with self._results_lock:
+      return self._results.copy()
+
+  def _GetType(self, test_type):
+    """Get the set of test results with the given test type."""
+    with self._results_lock:
+      return set(t for t in self._results if t.GetType() == test_type)
+
+  def GetPass(self):
+    """Get the set of all passed test results."""
+    return self._GetType(ResultType.PASS)
+
+  def GetSkip(self):
+    """Get the set of all skipped test results."""
+    return self._GetType(ResultType.SKIP)
+
+  def GetFail(self):
+    """Get the set of all failed test results."""
+    return self._GetType(ResultType.FAIL)
+
+  def GetCrash(self):
+    """Get the set of all crashed test results."""
+    return self._GetType(ResultType.CRASH)
+
+  def GetTimeout(self):
+    """Get the set of all timed out test results."""
+    return self._GetType(ResultType.TIMEOUT)
+
+  def GetUnknown(self):
+    """Get the set of all unknown test results."""
+    return self._GetType(ResultType.UNKNOWN)
+
+  def GetNotPass(self):
+    """Get the set of all non-passed test results."""
+    return self.GetAll() - self.GetPass()
+
+  def DidRunPass(self):
+    """Return whether the test run was successful."""
+    return not self.GetNotPass() - self.GetSkip()
+
diff --git a/build/android/pylib/base/base_test_result_unittest.py b/build/android/pylib/base/base_test_result_unittest.py
new file mode 100644
index 0000000..6f0cba7
--- /dev/null
+++ b/build/android/pylib/base/base_test_result_unittest.py
@@ -0,0 +1,82 @@
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Unittests for TestRunResults."""
+
+import unittest
+
+from pylib.base.base_test_result import BaseTestResult
+from pylib.base.base_test_result import TestRunResults
+from pylib.base.base_test_result import ResultType
+
+
+class TestTestRunResults(unittest.TestCase):
+  def setUp(self):
+    self.p1 = BaseTestResult('p1', ResultType.PASS, log='pass1')
+    other_p1 = BaseTestResult('p1', ResultType.PASS)
+    self.p2 = BaseTestResult('p2', ResultType.PASS)
+    self.f1 = BaseTestResult('f1', ResultType.FAIL, log='failure1')
+    self.c1 = BaseTestResult('c1', ResultType.CRASH, log='crash1')
+    self.u1 = BaseTestResult('u1', ResultType.UNKNOWN)
+    self.tr = TestRunResults()
+    self.tr.AddResult(self.p1)
+    self.tr.AddResult(other_p1)
+    self.tr.AddResult(self.p2)
+    self.tr.AddResults(set([self.f1, self.c1, self.u1]))
+
+  def testGetAll(self):
+    self.assertFalse(
+        self.tr.GetAll().symmetric_difference(
+            [self.p1, self.p2, self.f1, self.c1, self.u1]))
+
+  def testGetPass(self):
+    self.assertFalse(self.tr.GetPass().symmetric_difference(
+        [self.p1, self.p2]))
+
+  def testGetNotPass(self):
+    self.assertFalse(self.tr.GetNotPass().symmetric_difference(
+        [self.f1, self.c1, self.u1]))
+
+  def testGetAddTestRunResults(self):
+    tr2 = TestRunResults()
+    other_p1 = BaseTestResult('p1', ResultType.PASS)
+    f2 = BaseTestResult('f2', ResultType.FAIL)
+    tr2.AddResult(other_p1)
+    tr2.AddResult(f2)
+    tr2.AddTestRunResults(self.tr)
+    self.assertFalse(
+        tr2.GetAll().symmetric_difference(
+            [self.p1, self.p2, self.f1, self.c1, self.u1, f2]))
+
+  def testGetLogs(self):
+    log_print = ('[FAIL] f1:\n'
+                 'failure1\n'
+                 '[CRASH] c1:\n'
+                 'crash1')
+    self.assertEqual(self.tr.GetLogs(), log_print)
+
+  def testGetShortForm(self):
+    short_print = ('ALL: 5         PASS: 2        FAIL: 1        '
+                   'CRASH: 1       TIMEOUT: 0     UNKNOWN: 1     ')
+    self.assertEqual(self.tr.GetShortForm(), short_print)
+
+  def testGetGtestForm(self):
+    gtest_print = ('[==========] 5 tests ran.\n'
+                   '[  PASSED  ] 2 tests.\n'
+                   '[  FAILED  ] 3 tests, listed below:\n'
+                   '[  FAILED  ] f1\n'
+                   '[  FAILED  ] c1 (CRASHED)\n'
+                   '[  FAILED  ] u1 (UNKNOWN)\n'
+                   '\n'
+                   '3 FAILED TESTS')
+    self.assertEqual(gtest_print, self.tr.GetGtestForm())
+
+  def testRunPassed(self):
+    self.assertFalse(self.tr.DidRunPass())
+    tr2 = TestRunResults()
+    self.assertTrue(tr2.DidRunPass())
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/build/android/pylib/base/base_test_runner.py b/build/android/pylib/base/base_test_runner.py
new file mode 100644
index 0000000..77d05f7
--- /dev/null
+++ b/build/android/pylib/base/base_test_runner.py
@@ -0,0 +1,138 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Base class for running tests on a single device."""
+
+# TODO(jbudorick) Deprecate and remove this class and all subclasses after
+# any relevant parts have been ported to the new environment + test instance
+# model.
+
+import logging
+
+from devil.android import device_utils
+from devil.android import forwarder
+from devil.android import ports
+from pylib.valgrind_tools import CreateTool
+# TODO(frankf): Move this to pylib/utils
+
+
+# A file on device to store ports of net test server. The format of the file is
+# test-spawner-server-port:test-server-port
+NET_TEST_SERVER_PORT_INFO_FILE = 'net-test-server-ports'
+
+
+class BaseTestRunner(object):
+  """Base class for running tests on a single device."""
+
+  def __init__(self, device, tool):
+    """
+      Args:
+        device: An instance of DeviceUtils that the tests will run on.
+        tool: Name of the Valgrind tool.
+    """
+    assert isinstance(device, device_utils.DeviceUtils)
+    self.device = device
+    self.device_serial = self.device.adb.GetDeviceSerial()
+    self.tool = CreateTool(tool, self.device)
+    self._http_server = None
+    self._forwarder_device_port = 8000
+    self.forwarder_base_url = ('http://localhost:%d' %
+        self._forwarder_device_port)
+    # We will allocate port for test server spawner when calling method
+    # LaunchChromeTestServerSpawner and allocate port for test server when
+    # starting it in TestServerThread.
+    self.test_server_spawner_port = 0
+    self.test_server_port = 0
+
+  def _PushTestServerPortInfoToDevice(self):
+    """Pushes the latest port information to device."""
+    self.device.WriteFile(
+        self.device.GetExternalStoragePath() + '/' +
+            NET_TEST_SERVER_PORT_INFO_FILE,
+        '%d:%d' % (self.test_server_spawner_port, self.test_server_port))
+
+  def RunTest(self, test):
+    """Runs a test. Needs to be overridden.
+
+    Args:
+      test: A test to run.
+
+    Returns:
+      Tuple containing:
+        (base_test_result.TestRunResults, tests to rerun or None)
+    """
+    raise NotImplementedError
+
+  def InstallTestPackage(self):
+    """Installs the test package once before all tests are run."""
+    pass
+
+  def SetUp(self):
+    """Run once before all tests are run."""
+    self.InstallTestPackage()
+
+  def TearDown(self):
+    """Run once after all tests are run."""
+    self.ShutdownHelperToolsForTestSuite()
+
+  def LaunchTestHttpServer(self, document_root, port=None,
+                           extra_config_contents=None):
+    """Launches an HTTP server to serve HTTP tests.
+
+    Args:
+      document_root: Document root of the HTTP server.
+      port: port on which we want to the http server bind.
+      extra_config_contents: Extra config contents for the HTTP server.
+    """
+    import lighttpd_server
+    self._http_server = lighttpd_server.LighttpdServer(
+        document_root, port=port, extra_config_contents=extra_config_contents)
+    if self._http_server.StartupHttpServer():
+      logging.info('http server started: http://localhost:%s',
+                   self._http_server.port)
+    else:
+      logging.critical('Failed to start http server')
+    self._ForwardPortsForHttpServer()
+    return (self._forwarder_device_port, self._http_server.port)
+
+  def _ForwardPorts(self, port_pairs):
+    """Forwards a port."""
+    forwarder.Forwarder.Map(port_pairs, self.device, self.tool)
+
+  def _UnmapPorts(self, port_pairs):
+    """Unmap previously forwarded ports."""
+    for (device_port, _) in port_pairs:
+      forwarder.Forwarder.UnmapDevicePort(device_port, self.device)
+
+  # Deprecated: Use ForwardPorts instead.
+  def StartForwarder(self, port_pairs):
+    """Starts TCP traffic forwarding for the given |port_pairs|.
+
+    Args:
+      host_port_pairs: A list of (device_port, local_port) tuples to forward.
+    """
+    self._ForwardPorts(port_pairs)
+
+  def _ForwardPortsForHttpServer(self):
+    """Starts a forwarder for the HTTP server.
+
+    The forwarder forwards HTTP requests and responses between host and device.
+    """
+    self._ForwardPorts([(self._forwarder_device_port, self._http_server.port)])
+
+  def _RestartHttpServerForwarderIfNecessary(self):
+    """Restarts the forwarder if it's not open."""
+    # Checks to see if the http server port is being used.  If not forwards the
+    # request.
+    # TODO(dtrainor): This is not always reliable because sometimes the port
+    # will be left open even after the forwarder has been killed.
+    if not ports.IsDevicePortUsed(self.device, self._forwarder_device_port):
+      self._ForwardPortsForHttpServer()
+
+  def ShutdownHelperToolsForTestSuite(self):
+    """Shuts down the server and the forwarder."""
+    if self._http_server:
+      self._UnmapPorts([(self._forwarder_device_port, self._http_server.port)])
+      self._http_server.ShutdownHttpServer()
+
diff --git a/build/android/pylib/base/environment.py b/build/android/pylib/base/environment.py
new file mode 100644
index 0000000..3f49f41
--- /dev/null
+++ b/build/android/pylib/base/environment.py
@@ -0,0 +1,34 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+class Environment(object):
+  """An environment in which tests can be run.
+
+  This is expected to handle all logic that is applicable to an entire specific
+  environment but is independent of the test type.
+
+  Examples include:
+    - The local device environment, for running tests on devices attached to
+      the local machine.
+    - The local machine environment, for running tests directly on the local
+      machine.
+  """
+
+  def __init__(self):
+    pass
+
+  def SetUp(self):
+    raise NotImplementedError
+
+  def TearDown(self):
+    raise NotImplementedError
+
+  def __enter__(self):
+    self.SetUp()
+    return self
+
+  def __exit__(self, _exc_type, _exc_val, _exc_tb):
+    self.TearDown()
+
diff --git a/build/android/pylib/base/environment_factory.py b/build/android/pylib/base/environment_factory.py
new file mode 100644
index 0000000..f4fe935
--- /dev/null
+++ b/build/android/pylib/base/environment_factory.py
@@ -0,0 +1,21 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from pylib import constants
+from pylib.local.device import local_device_environment
+try:
+  from pylib.remote.device import remote_device_environment
+except ImportError:
+  remote_device_environment = None
+
+def CreateEnvironment(args, error_func):
+
+  if args.environment == 'local':
+    if args.command not in constants.LOCAL_MACHINE_TESTS:
+      return local_device_environment.LocalDeviceEnvironment(args, error_func)
+    # TODO(jbudorick) Add local machine environment.
+  if args.environment == 'remote_device' and remote_device_environment:
+    return remote_device_environment.RemoteDeviceEnvironment(args,
+                                                             error_func)
+  error_func('Unable to create %s environment.' % args.environment)
diff --git a/build/android/pylib/base/test_collection.py b/build/android/pylib/base/test_collection.py
new file mode 100644
index 0000000..de51027
--- /dev/null
+++ b/build/android/pylib/base/test_collection.py
@@ -0,0 +1,80 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import threading
+
+class TestCollection(object):
+  """A threadsafe collection of tests.
+
+  Args:
+    tests: List of tests to put in the collection.
+  """
+
+  def __init__(self, tests=None):
+    if not tests:
+      tests = []
+    self._lock = threading.Lock()
+    self._tests = []
+    self._tests_in_progress = 0
+    # Used to signal that an item is available or all items have been handled.
+    self._item_available_or_all_done = threading.Event()
+    for t in tests:
+      self.add(t)
+
+  def _pop(self):
+    """Pop a test from the collection.
+
+    Waits until a test is available or all tests have been handled.
+
+    Returns:
+      A test or None if all tests have been handled.
+    """
+    while True:
+      # Wait for a test to be available or all tests to have been handled.
+      self._item_available_or_all_done.wait()
+      with self._lock:
+        # Check which of the two conditions triggered the signal.
+        if self._tests_in_progress == 0:
+          return None
+        try:
+          return self._tests.pop(0)
+        except IndexError:
+          # Another thread beat us to the available test, wait again.
+          self._item_available_or_all_done.clear()
+
+  def add(self, test):
+    """Add a test to the collection.
+
+    Args:
+      test: A test to add.
+    """
+    with self._lock:
+      self._tests.append(test)
+      self._item_available_or_all_done.set()
+      self._tests_in_progress += 1
+
+  def test_completed(self):
+    """Indicate that a test has been fully handled."""
+    with self._lock:
+      self._tests_in_progress -= 1
+      if self._tests_in_progress == 0:
+        # All tests have been handled, signal all waiting threads.
+        self._item_available_or_all_done.set()
+
+  def __iter__(self):
+    """Iterate through tests in the collection until all have been handled."""
+    while True:
+      r = self._pop()
+      if r is None:
+        break
+      yield r
+
+  def __len__(self):
+    """Return the number of tests currently in the collection."""
+    return len(self._tests)
+
+  def test_names(self):
+    """Return a list of the names of the tests currently in the collection."""
+    with self._lock:
+      return list(t.test for t in self._tests)
diff --git a/build/android/pylib/base/test_dispatcher.py b/build/android/pylib/base/test_dispatcher.py
new file mode 100644
index 0000000..c513d9a
--- /dev/null
+++ b/build/android/pylib/base/test_dispatcher.py
@@ -0,0 +1,339 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Dispatches tests, either sharding or replicating them.
+
+Performs the following steps:
+* Create a test collection factory, using the given tests
+  - If sharding: test collection factory returns the same shared test collection
+    to all test runners
+  - If replciating: test collection factory returns a unique test collection to
+    each test runner, with the same set of tests in each.
+* Create a test runner for each device.
+* Run each test runner in its own thread, grabbing tests from the test
+  collection until there are no tests left.
+"""
+
+# TODO(jbudorick) Deprecate and remove this class after any relevant parts have
+# been ported to the new environment / test instance model.
+
+import logging
+import threading
+
+from devil.android import device_errors
+from devil.utils import reraiser_thread
+from devil.utils import watchdog_timer
+from pylib import constants
+from pylib.base import base_test_result
+from pylib.base import test_collection
+
+
+DEFAULT_TIMEOUT = 7 * 60  # seven minutes
+
+
+class _ThreadSafeCounter(object):
+  """A threadsafe counter."""
+
+  def __init__(self):
+    self._lock = threading.Lock()
+    self._value = 0
+
+  def GetAndIncrement(self):
+    """Get the current value and increment it atomically.
+
+    Returns:
+      The value before incrementing.
+    """
+    with self._lock:
+      pre_increment = self._value
+      self._value += 1
+      return pre_increment
+
+
+class _Test(object):
+  """Holds a test with additional metadata."""
+
+  def __init__(self, test, tries=0):
+    """Initializes the _Test object.
+
+    Args:
+      test: The test.
+      tries: Number of tries so far.
+    """
+    self.test = test
+    self.tries = tries
+
+
+def _RunTestsFromQueue(runner, collection, out_results, watcher,
+                       num_retries, tag_results_with_device=False):
+  """Runs tests from the collection until empty using the given runner.
+
+  Adds TestRunResults objects to the out_results list and may add tests to the
+  out_retry list.
+
+  Args:
+    runner: A TestRunner object used to run the tests.
+    collection: A TestCollection from which to get _Test objects to run.
+    out_results: A list to add TestRunResults to.
+    watcher: A watchdog_timer.WatchdogTimer object, used as a shared timeout.
+    num_retries: Number of retries for a test.
+    tag_results_with_device: If True, appends the name of the device on which
+        the test was run to the test name. Used when replicating to identify
+        which device ran each copy of the test, and to ensure each copy of the
+        test is recorded separately.
+  """
+
+  def TagTestRunResults(test_run_results):
+    """Tags all results with the last 4 digits of the device id.
+
+    Used when replicating tests to distinguish the same tests run on different
+    devices. We use a set to store test results, so the hash (generated from
+    name and tag) must be unique to be considered different results.
+    """
+    new_test_run_results = base_test_result.TestRunResults()
+    for test_result in test_run_results.GetAll():
+      test_result.SetName('%s_%s' % (runner.device_serial[-4:],
+                                     test_result.GetName()))
+      new_test_run_results.AddResult(test_result)
+    return new_test_run_results
+
+  for test in collection:
+    watcher.Reset()
+    try:
+      if not runner.device.IsOnline():
+        # Device is unresponsive, stop handling tests on this device.
+        msg = 'Device %s is unresponsive.' % runner.device_serial
+        logging.warning(msg)
+        raise device_errors.DeviceUnreachableError(msg)
+      result, retry = runner.RunTest(test.test)
+      if tag_results_with_device:
+        result = TagTestRunResults(result)
+      test.tries += 1
+      if retry and test.tries <= num_retries:
+        # Retry non-passing results, only record passing results.
+        pass_results = base_test_result.TestRunResults()
+        pass_results.AddResults(result.GetPass())
+        out_results.append(pass_results)
+        logging.warning('Will retry test %s, try #%s.', retry, test.tries)
+        collection.add(_Test(test=retry, tries=test.tries))
+      else:
+        # All tests passed or retry limit reached. Either way, record results.
+        out_results.append(result)
+    except:
+      # An unhandleable exception, ensure tests get run by another device and
+      # reraise this exception on the main thread.
+      collection.add(test)
+      raise
+    finally:
+      # Retries count as separate tasks so always mark the popped test as done.
+      collection.test_completed()
+
+
+def _SetUp(runner_factory, device, out_runners, threadsafe_counter):
+  """Creates a test runner for each device and calls SetUp() in parallel.
+
+  Note: if a device is unresponsive the corresponding TestRunner will not be
+    added to out_runners.
+
+  Args:
+    runner_factory: Callable that takes a device and index and returns a
+      TestRunner object.
+    device: The device serial number to set up.
+    out_runners: List to add the successfully set up TestRunner object.
+    threadsafe_counter: A _ThreadSafeCounter object used to get shard indices.
+  """
+  try:
+    index = threadsafe_counter.GetAndIncrement()
+    logging.warning('Creating shard %s for device %s.', index, device)
+    runner = runner_factory(device, index)
+    runner.SetUp()
+    out_runners.append(runner)
+  except (device_errors.CommandFailedError,
+          device_errors.CommandTimeoutError,
+          device_errors.DeviceUnreachableError):
+    logging.exception('Failed to create shard for %s', str(device))
+
+
+def _RunAllTests(runners, test_collection_factory, num_retries, timeout=None,
+                 tag_results_with_device=False):
+  """Run all tests using the given TestRunners.
+
+  Args:
+    runners: A list of TestRunner objects.
+    test_collection_factory: A callable to generate a TestCollection object for
+        each test runner.
+    num_retries: Number of retries for a test.
+    timeout: Watchdog timeout in seconds.
+    tag_results_with_device: If True, appends the name of the device on which
+        the test was run to the test name. Used when replicating to identify
+        which device ran each copy of the test, and to ensure each copy of the
+        test is recorded separately.
+
+  Returns:
+    A tuple of (TestRunResults object, exit code)
+  """
+  logging.warning('Running tests with %s test %s.',
+                  len(runners), 'runners' if len(runners) != 1 else 'runner')
+  results = []
+  exit_code = 0
+  run_results = base_test_result.TestRunResults()
+  watcher = watchdog_timer.WatchdogTimer(timeout)
+  test_collections = [test_collection_factory() for _ in runners]
+
+  threads = [
+      reraiser_thread.ReraiserThread(
+          _RunTestsFromQueue,
+          [r, tc, results, watcher, num_retries, tag_results_with_device],
+          name=r.device_serial[-4:])
+      for r, tc in zip(runners, test_collections)]
+
+  workers = reraiser_thread.ReraiserThreadGroup(threads)
+  workers.StartAll()
+
+  try:
+    workers.JoinAll(watcher)
+  except device_errors.CommandFailedError:
+    logging.exception('Command failed on device.')
+  except device_errors.CommandTimeoutError:
+    logging.exception('Command timed out on device.')
+  except device_errors.DeviceUnreachableError:
+    logging.exception('Device became unreachable.')
+
+  if not all((len(tc) == 0 for tc in test_collections)):
+    logging.error('Only ran %d tests (all devices are likely offline).',
+                  len(results))
+    for tc in test_collections:
+      run_results.AddResults(base_test_result.BaseTestResult(
+          t, base_test_result.ResultType.UNKNOWN) for t in tc.test_names())
+
+  for r in results:
+    run_results.AddTestRunResults(r)
+  if not run_results.DidRunPass():
+    exit_code = constants.ERROR_EXIT_CODE
+  return (run_results, exit_code)
+
+
+def _CreateRunners(runner_factory, devices, timeout=None):
+  """Creates a test runner for each device and calls SetUp() in parallel.
+
+  Note: if a device is unresponsive the corresponding TestRunner will not be
+    included in the returned list.
+
+  Args:
+    runner_factory: Callable that takes a device and index and returns a
+      TestRunner object.
+    devices: List of device serial numbers as strings.
+    timeout: Watchdog timeout in seconds, defaults to the default timeout.
+
+  Returns:
+    A list of TestRunner objects.
+  """
+  logging.warning('Creating %s test %s.', len(devices),
+                  'runners' if len(devices) != 1 else 'runner')
+  runners = []
+  counter = _ThreadSafeCounter()
+  threads = reraiser_thread.ReraiserThreadGroup(
+      [reraiser_thread.ReraiserThread(_SetUp,
+                                      [runner_factory, d, runners, counter],
+                                      name=str(d)[-4:])
+       for d in devices])
+  threads.StartAll()
+  threads.JoinAll(watchdog_timer.WatchdogTimer(timeout))
+  return runners
+
+
+def _TearDownRunners(runners, timeout=None):
+  """Calls TearDown() for each test runner in parallel.
+
+  Args:
+    runners: A list of TestRunner objects.
+    timeout: Watchdog timeout in seconds, defaults to the default timeout.
+  """
+  threads = reraiser_thread.ReraiserThreadGroup(
+      [reraiser_thread.ReraiserThread(r.TearDown, name=r.device_serial[-4:])
+       for r in runners])
+  threads.StartAll()
+  threads.JoinAll(watchdog_timer.WatchdogTimer(timeout))
+
+
+def ApplyMaxPerRun(tests, max_per_run):
+  """Rearrange the tests so that no group contains more than max_per_run tests.
+
+  Args:
+    tests:
+    max_per_run:
+
+  Returns:
+    A list of tests with no more than max_per_run per run.
+  """
+  tests_expanded = []
+  for test_group in tests:
+    if type(test_group) != str:
+      # Do not split test objects which are not strings.
+      tests_expanded.append(test_group)
+    else:
+      test_split = test_group.split(':')
+      for i in range(0, len(test_split), max_per_run):
+        tests_expanded.append(':'.join(test_split[i:i+max_per_run]))
+  return tests_expanded
+
+
+def RunTests(tests, runner_factory, devices, shard=True,
+             test_timeout=DEFAULT_TIMEOUT, setup_timeout=DEFAULT_TIMEOUT,
+             num_retries=2, max_per_run=256):
+  """Run all tests on attached devices, retrying tests that don't pass.
+
+  Args:
+    tests: List of tests to run.
+    runner_factory: Callable that takes a device and index and returns a
+        TestRunner object.
+    devices: List of attached devices.
+    shard: True if we should shard, False if we should replicate tests.
+      - Sharding tests will distribute tests across all test runners through a
+        shared test collection.
+      - Replicating tests will copy all tests to each test runner through a
+        unique test collection for each test runner.
+    test_timeout: Watchdog timeout in seconds for running tests.
+    setup_timeout: Watchdog timeout in seconds for creating and cleaning up
+        test runners.
+    num_retries: Number of retries for a test.
+    max_per_run: Maximum number of tests to run in any group.
+
+  Returns:
+    A tuple of (base_test_result.TestRunResults object, exit code).
+  """
+  if not tests:
+    logging.critical('No tests to run.')
+    return (base_test_result.TestRunResults(), constants.ERROR_EXIT_CODE)
+
+  tests_expanded = ApplyMaxPerRun(tests, max_per_run)
+  if shard:
+    # Generate a shared TestCollection object for all test runners, so they
+    # draw from a common pool of tests.
+    shared_test_collection = test_collection.TestCollection(
+        [_Test(t) for t in tests_expanded])
+    test_collection_factory = lambda: shared_test_collection
+    tag_results_with_device = False
+    log_string = 'sharded across devices'
+  else:
+    # Generate a unique TestCollection object for each test runner, but use
+    # the same set of tests.
+    test_collection_factory = lambda: test_collection.TestCollection(
+        [_Test(t) for t in tests_expanded])
+    tag_results_with_device = True
+    log_string = 'replicated on each device'
+
+  logging.info('Will run %d tests (%s): %s',
+               len(tests_expanded), log_string, str(tests_expanded))
+  runners = _CreateRunners(runner_factory, devices, setup_timeout)
+  try:
+    return _RunAllTests(runners, test_collection_factory,
+                        num_retries, test_timeout, tag_results_with_device)
+  finally:
+    try:
+      _TearDownRunners(runners, setup_timeout)
+    except device_errors.DeviceUnreachableError as e:
+      logging.warning('Device unresponsive during TearDown: [%s]', e)
+    except Exception: # pylint: disable=broad-except
+      logging.exception('Unexpected exception caught during TearDown')
diff --git a/build/android/pylib/base/test_dispatcher_unittest.py b/build/android/pylib/base/test_dispatcher_unittest.py
new file mode 100755
index 0000000..186a072
--- /dev/null
+++ b/build/android/pylib/base/test_dispatcher_unittest.py
@@ -0,0 +1,241 @@
+#!/usr/bin/env python
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Unittests for test_dispatcher.py."""
+
+# pylint: disable=no-self-use
+# pylint: disable=protected-access
+
+import unittest
+
+from pylib.base import base_test_result
+from pylib.base import test_collection
+from pylib.base import test_dispatcher
+from pylib.constants import host_paths
+
+with host_paths.SysPath(host_paths.DEVIL_PATH):
+  from devil.android import device_utils
+  from devil.android.sdk import adb_wrapper
+  from devil.constants import exit_codes
+  from devil.utils import watchdog_timer
+
+with host_paths.SysPath(host_paths.PYMOCK_PATH):
+  import mock # pylint: disable=import-error
+
+
+class TestException(Exception):
+  pass
+
+
+def _MockDevice(serial):
+  d = mock.MagicMock(spec=device_utils.DeviceUtils)
+  d.__str__.return_value = serial
+  d.adb = mock.MagicMock(spec=adb_wrapper.AdbWrapper)
+  d.adb.GetDeviceSerial = mock.MagicMock(return_value=serial)
+  d.IsOnline = mock.MagicMock(return_value=True)
+  return d
+
+
+class MockRunner(object):
+  """A mock TestRunner."""
+  def __init__(self, device=None, shard_index=0):
+    self.device = device or _MockDevice('0')
+    self.device_serial = self.device.adb.GetDeviceSerial()
+    self.shard_index = shard_index
+    self.setups = 0
+    self.teardowns = 0
+
+  def RunTest(self, test):
+    results = base_test_result.TestRunResults()
+    results.AddResult(
+        base_test_result.BaseTestResult(test, base_test_result.ResultType.PASS))
+    return (results, None)
+
+  def SetUp(self):
+    self.setups += 1
+
+  def TearDown(self):
+    self.teardowns += 1
+
+
+class MockRunnerFail(MockRunner):
+  def RunTest(self, test):
+    results = base_test_result.TestRunResults()
+    results.AddResult(
+        base_test_result.BaseTestResult(test, base_test_result.ResultType.FAIL))
+    return (results, test)
+
+
+class MockRunnerFailTwice(MockRunner):
+  def __init__(self, device=None, shard_index=0):
+    super(MockRunnerFailTwice, self).__init__(device, shard_index)
+    self._fails = 0
+
+  def RunTest(self, test):
+    self._fails += 1
+    results = base_test_result.TestRunResults()
+    if self._fails <= 2:
+      results.AddResult(base_test_result.BaseTestResult(
+          test, base_test_result.ResultType.FAIL))
+      return (results, test)
+    else:
+      results.AddResult(base_test_result.BaseTestResult(
+          test, base_test_result.ResultType.PASS))
+      return (results, None)
+
+
+class MockRunnerException(MockRunner):
+  def RunTest(self, test):
+    raise TestException
+
+
+class TestFunctions(unittest.TestCase):
+  """Tests test_dispatcher._RunTestsFromQueue."""
+  @staticmethod
+  def _RunTests(mock_runner, tests):
+    results = []
+    tests = test_collection.TestCollection(
+        [test_dispatcher._Test(t) for t in tests])
+    test_dispatcher._RunTestsFromQueue(mock_runner, tests, results,
+                                       watchdog_timer.WatchdogTimer(None), 2)
+    run_results = base_test_result.TestRunResults()
+    for r in results:
+      run_results.AddTestRunResults(r)
+    return run_results
+
+  def testRunTestsFromQueue(self):
+    results = TestFunctions._RunTests(MockRunner(), ['a', 'b'])
+    self.assertEqual(len(results.GetPass()), 2)
+    self.assertEqual(len(results.GetNotPass()), 0)
+
+  def testRunTestsFromQueueRetry(self):
+    results = TestFunctions._RunTests(MockRunnerFail(), ['a', 'b'])
+    self.assertEqual(len(results.GetPass()), 0)
+    self.assertEqual(len(results.GetFail()), 2)
+
+  def testRunTestsFromQueueFailTwice(self):
+    results = TestFunctions._RunTests(MockRunnerFailTwice(), ['a', 'b'])
+    self.assertEqual(len(results.GetPass()), 2)
+    self.assertEqual(len(results.GetNotPass()), 0)
+
+  def testSetUp(self):
+    runners = []
+    counter = test_dispatcher._ThreadSafeCounter()
+    test_dispatcher._SetUp(MockRunner, _MockDevice('0'), runners, counter)
+    self.assertEqual(len(runners), 1)
+    self.assertEqual(runners[0].setups, 1)
+
+  def testThreadSafeCounter(self):
+    counter = test_dispatcher._ThreadSafeCounter()
+    for i in xrange(5):
+      self.assertEqual(counter.GetAndIncrement(), i)
+
+  def testApplyMaxPerRun(self):
+    self.assertEqual(
+        ['A:B', 'C:D', 'E', 'F:G', 'H:I'],
+        test_dispatcher.ApplyMaxPerRun(['A:B', 'C:D:E', 'F:G:H:I'], 2))
+
+
+class TestThreadGroupFunctions(unittest.TestCase):
+  """Tests test_dispatcher._RunAllTests and test_dispatcher._CreateRunners."""
+  def setUp(self):
+    self.tests = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
+    shared_test_collection = test_collection.TestCollection(
+        [test_dispatcher._Test(t) for t in self.tests])
+    self.test_collection_factory = lambda: shared_test_collection
+
+  def testCreate(self):
+    runners = test_dispatcher._CreateRunners(
+        MockRunner, [_MockDevice('0'), _MockDevice('1')])
+    for runner in runners:
+      self.assertEqual(runner.setups, 1)
+    self.assertEqual(set([r.device_serial for r in runners]),
+                     set(['0', '1']))
+    self.assertEqual(set([r.shard_index for r in runners]),
+                     set([0, 1]))
+
+  def testRun(self):
+    runners = [MockRunner(_MockDevice('0')), MockRunner(_MockDevice('1'))]
+    results, exit_code = test_dispatcher._RunAllTests(
+        runners, self.test_collection_factory, 0)
+    self.assertEqual(len(results.GetPass()), len(self.tests))
+    self.assertEqual(exit_code, 0)
+
+  def testTearDown(self):
+    runners = [MockRunner(_MockDevice('0')), MockRunner(_MockDevice('1'))]
+    test_dispatcher._TearDownRunners(runners)
+    for runner in runners:
+      self.assertEqual(runner.teardowns, 1)
+
+  def testRetry(self):
+    runners = test_dispatcher._CreateRunners(
+        MockRunnerFail, [_MockDevice('0'), _MockDevice('1')])
+    results, exit_code = test_dispatcher._RunAllTests(
+        runners, self.test_collection_factory, 0)
+    self.assertEqual(len(results.GetFail()), len(self.tests))
+    self.assertEqual(exit_code, exit_codes.ERROR)
+
+  def testReraise(self):
+    runners = test_dispatcher._CreateRunners(
+        MockRunnerException, [_MockDevice('0'), _MockDevice('1')])
+    with self.assertRaises(TestException):
+      test_dispatcher._RunAllTests(runners, self.test_collection_factory, 0)
+
+
+class TestShard(unittest.TestCase):
+  """Tests test_dispatcher.RunTests with sharding."""
+  @staticmethod
+  def _RunShard(runner_factory):
+    return test_dispatcher.RunTests(
+        ['a', 'b', 'c'], runner_factory, [_MockDevice('0'), _MockDevice('1')],
+        shard=True)
+
+  def testShard(self):
+    results, exit_code = TestShard._RunShard(MockRunner)
+    self.assertEqual(len(results.GetPass()), 3)
+    self.assertEqual(exit_code, 0)
+
+  def testFailing(self):
+    results, exit_code = TestShard._RunShard(MockRunnerFail)
+    self.assertEqual(len(results.GetPass()), 0)
+    self.assertEqual(len(results.GetFail()), 3)
+    self.assertEqual(exit_code, exit_codes.ERROR)
+
+  def testNoTests(self):
+    results, exit_code = test_dispatcher.RunTests(
+        [], MockRunner, [_MockDevice('0'), _MockDevice('1')], shard=True)
+    self.assertEqual(len(results.GetAll()), 0)
+    self.assertEqual(exit_code, exit_codes.ERROR)
+
+
+class TestReplicate(unittest.TestCase):
+  """Tests test_dispatcher.RunTests with replication."""
+  @staticmethod
+  def _RunReplicate(runner_factory):
+    return test_dispatcher.RunTests(
+        ['a', 'b', 'c'], runner_factory, [_MockDevice('0'), _MockDevice('1')],
+        shard=False)
+
+  def testReplicate(self):
+    results, exit_code = TestReplicate._RunReplicate(MockRunner)
+    # We expect 6 results since each test should have been run on every device
+    self.assertEqual(len(results.GetPass()), 6)
+    self.assertEqual(exit_code, 0)
+
+  def testFailing(self):
+    results, exit_code = TestReplicate._RunReplicate(MockRunnerFail)
+    self.assertEqual(len(results.GetPass()), 0)
+    self.assertEqual(len(results.GetFail()), 6)
+    self.assertEqual(exit_code, exit_codes.ERROR)
+
+  def testNoTests(self):
+    results, exit_code = test_dispatcher.RunTests(
+        [], MockRunner, [_MockDevice('0'), _MockDevice('1')], shard=False)
+    self.assertEqual(len(results.GetAll()), 0)
+    self.assertEqual(exit_code, exit_codes.ERROR)
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/build/android/pylib/base/test_instance.py b/build/android/pylib/base/test_instance.py
new file mode 100644
index 0000000..cdf678f
--- /dev/null
+++ b/build/android/pylib/base/test_instance.py
@@ -0,0 +1,35 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+class TestInstance(object):
+  """A type of test.
+
+  This is expected to handle all logic that is test-type specific but
+  independent of the environment or device.
+
+  Examples include:
+    - gtests
+    - instrumentation tests
+  """
+
+  def __init__(self):
+    pass
+
+  def TestType(self):
+    raise NotImplementedError
+
+  def SetUp(self):
+    raise NotImplementedError
+
+  def TearDown(self):
+    raise NotImplementedError
+
+  def __enter__(self):
+    self.SetUp()
+    return self
+
+  def __exit__(self, _exc_type, _exc_val, _exc_tb):
+    self.TearDown()
+
diff --git a/build/android/pylib/base/test_instance_factory.py b/build/android/pylib/base/test_instance_factory.py
new file mode 100644
index 0000000..523b4c5
--- /dev/null
+++ b/build/android/pylib/base/test_instance_factory.py
@@ -0,0 +1,22 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from pylib.gtest import gtest_test_instance
+from pylib.instrumentation import instrumentation_test_instance
+from pylib.uirobot import uirobot_test_instance
+from pylib.utils import isolator
+
+
+def CreateTestInstance(args, error_func):
+
+  if args.command == 'gtest':
+    return gtest_test_instance.GtestTestInstance(
+        args, isolator.Isolator(), error_func)
+  elif args.command == 'instrumentation':
+    return instrumentation_test_instance.InstrumentationTestInstance(
+        args, isolator.Isolator(), error_func)
+  elif args.command == 'uirobot':
+    return uirobot_test_instance.UirobotTestInstance(args, error_func)
+
+  error_func('Unable to create %s test instance.' % args.command)
diff --git a/build/android/pylib/base/test_run.py b/build/android/pylib/base/test_run.py
new file mode 100644
index 0000000..7380e78
--- /dev/null
+++ b/build/android/pylib/base/test_run.py
@@ -0,0 +1,39 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+class TestRun(object):
+  """An execution of a particular test on a particular device.
+
+  This is expected to handle all logic that is specific to the combination of
+  environment and test type.
+
+  Examples include:
+    - local gtests
+    - local instrumentation tests
+  """
+
+  def __init__(self, env, test_instance):
+    self._env = env
+    self._test_instance = test_instance
+
+  def TestPackage(self):
+    raise NotImplementedError
+
+  def SetUp(self):
+    raise NotImplementedError
+
+  def RunTests(self):
+    raise NotImplementedError
+
+  def TearDown(self):
+    raise NotImplementedError
+
+  def __enter__(self):
+    self.SetUp()
+    return self
+
+  def __exit__(self, exc_type, exc_val, exc_tb):
+    self.TearDown()
+
diff --git a/build/android/pylib/base/test_run_factory.py b/build/android/pylib/base/test_run_factory.py
new file mode 100644
index 0000000..8db9bd6
--- /dev/null
+++ b/build/android/pylib/base/test_run_factory.py
@@ -0,0 +1,54 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from pylib.gtest import gtest_test_instance
+from pylib.instrumentation import instrumentation_test_instance
+from pylib.local.device import local_device_environment
+from pylib.local.device import local_device_gtest_run
+from pylib.local.device import local_device_instrumentation_test_run
+from pylib.uirobot import uirobot_test_instance
+
+try:
+  from pylib.remote.device import remote_device_environment
+  from pylib.remote.device import remote_device_gtest_run
+  from pylib.remote.device import remote_device_instrumentation_test_run
+  from pylib.remote.device import remote_device_uirobot_test_run
+except ImportError:
+  remote_device_environment = None
+  remote_device_gtest_run = None
+  remote_device_instrumentation_test_run = None
+  remote_device_uirobot_test_run = None
+
+
+def CreateTestRun(_args, env, test_instance, error_func):
+  if isinstance(env, local_device_environment.LocalDeviceEnvironment):
+    if isinstance(test_instance, gtest_test_instance.GtestTestInstance):
+      return local_device_gtest_run.LocalDeviceGtestRun(env, test_instance)
+    if isinstance(test_instance,
+                  instrumentation_test_instance.InstrumentationTestInstance):
+      return (local_device_instrumentation_test_run
+              .LocalDeviceInstrumentationTestRun(env, test_instance))
+
+  if (remote_device_environment
+      and isinstance(env, remote_device_environment.RemoteDeviceEnvironment)):
+    # The remote_device modules should be all or nothing.
+    assert (remote_device_gtest_run
+            and remote_device_instrumentation_test_run
+            and remote_device_uirobot_test_run)
+
+    if isinstance(test_instance, gtest_test_instance.GtestTestInstance):
+      return remote_device_gtest_run.RemoteDeviceGtestTestRun(
+          env, test_instance)
+    if isinstance(test_instance,
+                  instrumentation_test_instance.InstrumentationTestInstance):
+      return (remote_device_instrumentation_test_run
+              .RemoteDeviceInstrumentationTestRun(env, test_instance))
+    if isinstance(test_instance, uirobot_test_instance.UirobotTestInstance):
+      return remote_device_uirobot_test_run.RemoteDeviceUirobotTestRun(
+          env, test_instance)
+
+
+  error_func('Unable to create test run for %s tests in %s environment'
+             % (str(test_instance), str(env)))
+
diff --git a/build/android/pylib/base/test_server.py b/build/android/pylib/base/test_server.py
new file mode 100644
index 0000000..085a51e
--- /dev/null
+++ b/build/android/pylib/base/test_server.py
@@ -0,0 +1,19 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+class TestServer(object):
+  """Base class for any server that needs to be set up for the tests."""
+
+  def __init__(self, *args, **kwargs):
+    pass
+
+  def SetUp(self):
+    raise NotImplementedError
+
+  def Reset(self):
+    raise NotImplementedError
+
+  def TearDown(self):
+    raise NotImplementedError
+
diff --git a/build/android/pylib/chrome_test_server_spawner.py b/build/android/pylib/chrome_test_server_spawner.py
new file mode 100644
index 0000000..ec2896f
--- /dev/null
+++ b/build/android/pylib/chrome_test_server_spawner.py
@@ -0,0 +1,430 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""A "Test Server Spawner" that handles killing/stopping per-test test servers.
+
+It's used to accept requests from the device to spawn and kill instances of the
+chrome test server on the host.
+"""
+# pylint: disable=W0702
+
+import BaseHTTPServer
+import json
+import logging
+import os
+import select
+import struct
+import subprocess
+import sys
+import threading
+import time
+import urlparse
+
+from devil.android import forwarder
+from devil.android import ports
+
+from pylib import constants
+from pylib.constants import host_paths
+
+
+# Path that are needed to import necessary modules when launching a testserver.
+os.environ['PYTHONPATH'] = os.environ.get('PYTHONPATH', '') + (':%s:%s:%s:%s:%s'
+    % (os.path.join(host_paths.DIR_SOURCE_ROOT, 'third_party'),
+       os.path.join(host_paths.DIR_SOURCE_ROOT, 'third_party', 'tlslite'),
+       os.path.join(host_paths.DIR_SOURCE_ROOT, 'third_party', 'pyftpdlib',
+                    'src'),
+       os.path.join(host_paths.DIR_SOURCE_ROOT, 'net', 'tools', 'testserver'),
+       os.path.join(host_paths.DIR_SOURCE_ROOT, 'sync', 'tools', 'testserver')))
+
+
+SERVER_TYPES = {
+    'http': '',
+    'ftp': '-f',
+    'sync': '',  # Sync uses its own script, and doesn't take a server type arg.
+    'tcpecho': '--tcp-echo',
+    'udpecho': '--udp-echo',
+}
+
+
+# The timeout (in seconds) of starting up the Python test server.
+TEST_SERVER_STARTUP_TIMEOUT = 10
+
+def _WaitUntil(predicate, max_attempts=5):
+  """Blocks until the provided predicate (function) is true.
+
+  Returns:
+    Whether the provided predicate was satisfied once (before the timeout).
+  """
+  sleep_time_sec = 0.025
+  for _ in xrange(1, max_attempts):
+    if predicate():
+      return True
+    time.sleep(sleep_time_sec)
+    sleep_time_sec = min(1, sleep_time_sec * 2)  # Don't wait more than 1 sec.
+  return False
+
+
+def _CheckPortAvailable(port):
+  """Returns True if |port| is available."""
+  return _WaitUntil(lambda: ports.IsHostPortAvailable(port))
+
+
+def _CheckPortNotAvailable(port):
+  """Returns True if |port| is not available."""
+  return _WaitUntil(lambda: not ports.IsHostPortAvailable(port))
+
+
+def _CheckDevicePortStatus(device, port):
+  """Returns whether the provided port is used."""
+  return _WaitUntil(lambda: ports.IsDevicePortUsed(device, port))
+
+
+def _GetServerTypeCommandLine(server_type):
+  """Returns the command-line by the given server type.
+
+  Args:
+    server_type: the server type to be used (e.g. 'http').
+
+  Returns:
+    A string containing the command-line argument.
+  """
+  if server_type not in SERVER_TYPES:
+    raise NotImplementedError('Unknown server type: %s' % server_type)
+  if server_type == 'udpecho':
+    raise Exception('Please do not run UDP echo tests because we do not have '
+                    'a UDP forwarder tool.')
+  return SERVER_TYPES[server_type]
+
+
+class TestServerThread(threading.Thread):
+  """A thread to run the test server in a separate process."""
+
+  def __init__(self, ready_event, arguments, device, tool):
+    """Initialize TestServerThread with the following argument.
+
+    Args:
+      ready_event: event which will be set when the test server is ready.
+      arguments: dictionary of arguments to run the test server.
+      device: An instance of DeviceUtils.
+      tool: instance of runtime error detection tool.
+    """
+    threading.Thread.__init__(self)
+    self.wait_event = threading.Event()
+    self.stop_flag = False
+    self.ready_event = ready_event
+    self.ready_event.clear()
+    self.arguments = arguments
+    self.device = device
+    self.tool = tool
+    self.test_server_process = None
+    self.is_ready = False
+    self.host_port = self.arguments['port']
+    assert isinstance(self.host_port, int)
+    # The forwarder device port now is dynamically allocated.
+    self.forwarder_device_port = 0
+    # Anonymous pipe in order to get port info from test server.
+    self.pipe_in = None
+    self.pipe_out = None
+    self.process = None
+    self.command_line = []
+
+  def _WaitToStartAndGetPortFromTestServer(self):
+    """Waits for the Python test server to start and gets the port it is using.
+
+    The port information is passed by the Python test server with a pipe given
+    by self.pipe_out. It is written as a result to |self.host_port|.
+
+    Returns:
+      Whether the port used by the test server was successfully fetched.
+    """
+    assert self.host_port == 0 and self.pipe_out and self.pipe_in
+    (in_fds, _, _) = select.select([self.pipe_in, ], [], [],
+                                   TEST_SERVER_STARTUP_TIMEOUT)
+    if len(in_fds) == 0:
+      logging.error('Failed to wait to the Python test server to be started.')
+      return False
+    # First read the data length as an unsigned 4-byte value.  This
+    # is _not_ using network byte ordering since the Python test server packs
+    # size as native byte order and all Chromium platforms so far are
+    # configured to use little-endian.
+    # TODO(jnd): Change the Python test server and local_test_server_*.cc to
+    # use a unified byte order (either big-endian or little-endian).
+    data_length = os.read(self.pipe_in, struct.calcsize('=L'))
+    if data_length:
+      (data_length,) = struct.unpack('=L', data_length)
+      assert data_length
+    if not data_length:
+      logging.error('Failed to get length of server data.')
+      return False
+    port_json = os.read(self.pipe_in, data_length)
+    if not port_json:
+      logging.error('Failed to get server data.')
+      return False
+    logging.info('Got port json data: %s', port_json)
+    port_json = json.loads(port_json)
+    if port_json.has_key('port') and isinstance(port_json['port'], int):
+      self.host_port = port_json['port']
+      return _CheckPortNotAvailable(self.host_port)
+    logging.error('Failed to get port information from the server data.')
+    return False
+
+  def _GenerateCommandLineArguments(self):
+    """Generates the command line to run the test server.
+
+    Note that all options are processed by following the definitions in
+    testserver.py.
+    """
+    if self.command_line:
+      return
+
+    args_copy = dict(self.arguments)
+
+    # Translate the server type.
+    type_cmd = _GetServerTypeCommandLine(args_copy.pop('server-type'))
+    if type_cmd:
+      self.command_line.append(type_cmd)
+
+    # Use a pipe to get the port given by the instance of Python test server
+    # if the test does not specify the port.
+    assert self.host_port == args_copy['port']
+    if self.host_port == 0:
+      (self.pipe_in, self.pipe_out) = os.pipe()
+      self.command_line.append('--startup-pipe=%d' % self.pipe_out)
+
+    # Pass the remaining arguments as-is.
+    for key, values in args_copy.iteritems():
+      if not isinstance(values, list):
+        values = [values]
+      for value in values:
+        if value is None:
+          self.command_line.append('--%s' % key)
+        else:
+          self.command_line.append('--%s=%s' % (key, value))
+
+  def _CloseUnnecessaryFDsForTestServerProcess(self):
+    # This is required to avoid subtle deadlocks that could be caused by the
+    # test server child process inheriting undesirable file descriptors such as
+    # file lock file descriptors.
+    for fd in xrange(0, 1024):
+      if fd != self.pipe_out:
+        try:
+          os.close(fd)
+        except:
+          pass
+
+  def run(self):
+    logging.info('Start running the thread!')
+    self.wait_event.clear()
+    self._GenerateCommandLineArguments()
+    command = host_paths.DIR_SOURCE_ROOT
+    if self.arguments['server-type'] == 'sync':
+      command = [os.path.join(command, 'sync', 'tools', 'testserver',
+                              'sync_testserver.py')] + self.command_line
+    else:
+      command = [os.path.join(command, 'net', 'tools', 'testserver',
+                              'testserver.py')] + self.command_line
+    logging.info('Running: %s', command)
+
+    # Disable PYTHONUNBUFFERED because it has a bad interaction with the
+    # testserver. Remove once this interaction is fixed.
+    unbuf = os.environ.pop('PYTHONUNBUFFERED', None)
+
+    # Pass DIR_SOURCE_ROOT as the child's working directory so that relative
+    # paths in the arguments are resolved correctly.
+    self.process = subprocess.Popen(
+        command, preexec_fn=self._CloseUnnecessaryFDsForTestServerProcess,
+        cwd=host_paths.DIR_SOURCE_ROOT)
+    if unbuf:
+      os.environ['PYTHONUNBUFFERED'] = unbuf
+    if self.process:
+      if self.pipe_out:
+        self.is_ready = self._WaitToStartAndGetPortFromTestServer()
+      else:
+        self.is_ready = _CheckPortNotAvailable(self.host_port)
+    if self.is_ready:
+      forwarder.Forwarder.Map([(0, self.host_port)], self.device, self.tool)
+      # Check whether the forwarder is ready on the device.
+      self.is_ready = False
+      device_port = forwarder.Forwarder.DevicePortForHostPort(self.host_port)
+      if device_port and _CheckDevicePortStatus(self.device, device_port):
+        self.is_ready = True
+        self.forwarder_device_port = device_port
+    # Wake up the request handler thread.
+    self.ready_event.set()
+    # Keep thread running until Stop() gets called.
+    _WaitUntil(lambda: self.stop_flag, max_attempts=sys.maxint)
+    if self.process.poll() is None:
+      self.process.kill()
+    forwarder.Forwarder.UnmapDevicePort(self.forwarder_device_port, self.device)
+    self.process = None
+    self.is_ready = False
+    if self.pipe_out:
+      os.close(self.pipe_in)
+      os.close(self.pipe_out)
+      self.pipe_in = None
+      self.pipe_out = None
+    logging.info('Test-server has died.')
+    self.wait_event.set()
+
+  def Stop(self):
+    """Blocks until the loop has finished.
+
+    Note that this must be called in another thread.
+    """
+    if not self.process:
+      return
+    self.stop_flag = True
+    self.wait_event.wait()
+
+
+class SpawningServerRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+  """A handler used to process http GET/POST request."""
+
+  def _SendResponse(self, response_code, response_reason, additional_headers,
+                    contents):
+    """Generates a response sent to the client from the provided parameters.
+
+    Args:
+      response_code: number of the response status.
+      response_reason: string of reason description of the response.
+      additional_headers: dict of additional headers. Each key is the name of
+                          the header, each value is the content of the header.
+      contents: string of the contents we want to send to client.
+    """
+    self.send_response(response_code, response_reason)
+    self.send_header('Content-Type', 'text/html')
+    # Specify the content-length as without it the http(s) response will not
+    # be completed properly (and the browser keeps expecting data).
+    self.send_header('Content-Length', len(contents))
+    for header_name in additional_headers:
+      self.send_header(header_name, additional_headers[header_name])
+    self.end_headers()
+    self.wfile.write(contents)
+    self.wfile.flush()
+
+  def _StartTestServer(self):
+    """Starts the test server thread."""
+    logging.info('Handling request to spawn a test server.')
+    content_type = self.headers.getheader('content-type')
+    if content_type != 'application/json':
+      raise Exception('Bad content-type for start request.')
+    content_length = self.headers.getheader('content-length')
+    if not content_length:
+      content_length = 0
+    try:
+      content_length = int(content_length)
+    except:
+      raise Exception('Bad content-length for start request.')
+    logging.info(content_length)
+    test_server_argument_json = self.rfile.read(content_length)
+    logging.info(test_server_argument_json)
+    assert not self.server.test_server_instance
+    ready_event = threading.Event()
+    self.server.test_server_instance = TestServerThread(
+        ready_event,
+        json.loads(test_server_argument_json),
+        self.server.device,
+        self.server.tool)
+    self.server.test_server_instance.setDaemon(True)
+    self.server.test_server_instance.start()
+    ready_event.wait()
+    if self.server.test_server_instance.is_ready:
+      self._SendResponse(200, 'OK', {}, json.dumps(
+          {'port': self.server.test_server_instance.forwarder_device_port,
+           'message': 'started'}))
+      logging.info('Test server is running on port: %d.',
+                   self.server.test_server_instance.host_port)
+    else:
+      self.server.test_server_instance.Stop()
+      self.server.test_server_instance = None
+      self._SendResponse(500, 'Test Server Error.', {}, '')
+      logging.info('Encounter problem during starting a test server.')
+
+  def _KillTestServer(self):
+    """Stops the test server instance."""
+    # There should only ever be one test server at a time. This may do the
+    # wrong thing if we try and start multiple test servers.
+    if not self.server.test_server_instance:
+      return
+    port = self.server.test_server_instance.host_port
+    logging.info('Handling request to kill a test server on port: %d.', port)
+    self.server.test_server_instance.Stop()
+    # Make sure the status of test server is correct before sending response.
+    if _CheckPortAvailable(port):
+      self._SendResponse(200, 'OK', {}, 'killed')
+      logging.info('Test server on port %d is killed', port)
+    else:
+      self._SendResponse(500, 'Test Server Error.', {}, '')
+      logging.info('Encounter problem during killing a test server.')
+    self.server.test_server_instance = None
+
+  def do_POST(self):
+    parsed_path = urlparse.urlparse(self.path)
+    action = parsed_path.path
+    logging.info('Action for POST method is: %s.', action)
+    if action == '/start':
+      self._StartTestServer()
+    else:
+      self._SendResponse(400, 'Unknown request.', {}, '')
+      logging.info('Encounter unknown request: %s.', action)
+
+  def do_GET(self):
+    parsed_path = urlparse.urlparse(self.path)
+    action = parsed_path.path
+    params = urlparse.parse_qs(parsed_path.query, keep_blank_values=1)
+    logging.info('Action for GET method is: %s.', action)
+    for param in params:
+      logging.info('%s=%s', param, params[param][0])
+    if action == '/kill':
+      self._KillTestServer()
+    elif action == '/ping':
+      # The ping handler is used to check whether the spawner server is ready
+      # to serve the requests. We don't need to test the status of the test
+      # server when handling ping request.
+      self._SendResponse(200, 'OK', {}, 'ready')
+      logging.info('Handled ping request and sent response.')
+    else:
+      self._SendResponse(400, 'Unknown request', {}, '')
+      logging.info('Encounter unknown request: %s.', action)
+
+
+class SpawningServer(object):
+  """The class used to start/stop a http server."""
+
+  def __init__(self, test_server_spawner_port, device, tool):
+    logging.info('Creating new spawner on port: %d.', test_server_spawner_port)
+    self.server = BaseHTTPServer.HTTPServer(('', test_server_spawner_port),
+                                            SpawningServerRequestHandler)
+    self.server.device = device
+    self.server.tool = tool
+    self.server.test_server_instance = None
+    self.server.build_type = constants.GetBuildType()
+
+  def _Listen(self):
+    logging.info('Starting test server spawner')
+    self.server.serve_forever()
+
+  def Start(self):
+    """Starts the test server spawner."""
+    listener_thread = threading.Thread(target=self._Listen)
+    listener_thread.setDaemon(True)
+    listener_thread.start()
+
+  def Stop(self):
+    """Stops the test server spawner.
+
+    Also cleans the server state.
+    """
+    self.CleanupState()
+    self.server.shutdown()
+
+  def CleanupState(self):
+    """Cleans up the spawning server state.
+
+    This should be called if the test server spawner is reused,
+    to avoid sharing the test server instance.
+    """
+    if self.server.test_server_instance:
+      self.server.test_server_instance.Stop()
+      self.server.test_server_instance = None
diff --git a/build/android/pylib/constants/__init__.py b/build/android/pylib/constants/__init__.py
new file mode 100644
index 0000000..2e84cea
--- /dev/null
+++ b/build/android/pylib/constants/__init__.py
@@ -0,0 +1,271 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Defines a set of constants shared by test runners and other scripts."""
+
+# TODO(jbudorick): Split these constants into coherent modules.
+
+# pylint: disable=W0212
+
+import collections
+import glob
+import logging
+import os
+import subprocess
+
+import devil.android.sdk.keyevent
+from devil.android.sdk import version_codes
+from devil.constants import exit_codes
+
+
+keyevent = devil.android.sdk.keyevent
+
+
+DIR_SOURCE_ROOT = os.environ.get('CHECKOUT_SOURCE_ROOT',
+    os.path.abspath(os.path.join(os.path.dirname(__file__),
+                                 os.pardir, os.pardir, os.pardir, os.pardir)))
+
+PackageInfo = collections.namedtuple('PackageInfo',
+    ['package', 'activity', 'cmdline_file', 'devtools_socket'])
+
+PACKAGE_INFO = {
+    'chrome_document': PackageInfo(
+        'com.google.android.apps.chrome.document',
+        'com.google.android.apps.chrome.document.ChromeLauncherActivity',
+        '/data/local/chrome-command-line',
+        'chrome_devtools_remote'),
+    'chrome': PackageInfo(
+        'com.google.android.apps.chrome',
+        'com.google.android.apps.chrome.Main',
+        '/data/local/chrome-command-line',
+        'chrome_devtools_remote'),
+    'chrome_beta': PackageInfo(
+        'com.chrome.beta',
+        'com.google.android.apps.chrome.Main',
+        '/data/local/chrome-command-line',
+        'chrome_devtools_remote'),
+    'chrome_stable': PackageInfo(
+        'com.android.chrome',
+        'com.google.android.apps.chrome.Main',
+        '/data/local/chrome-command-line',
+        'chrome_devtools_remote'),
+    'chrome_dev': PackageInfo(
+        'com.chrome.dev',
+        'com.google.android.apps.chrome.Main',
+        '/data/local/chrome-command-line',
+        'chrome_devtools_remote'),
+    'chrome_canary': PackageInfo(
+        'com.chrome.canary',
+        'com.google.android.apps.chrome.Main',
+        '/data/local/chrome-command-line',
+        'chrome_devtools_remote'),
+    'chrome_work': PackageInfo(
+        'com.chrome.work',
+        'com.google.android.apps.chrome.Main',
+        '/data/local/chrome-command-line',
+        'chrome_devtools_remote'),
+    'chromium': PackageInfo(
+        'org.chromium.chrome',
+        'com.google.android.apps.chrome.Main',
+        '/data/local/chrome-command-line',
+        'chrome_devtools_remote'),
+    'legacy_browser': PackageInfo(
+        'com.google.android.browser',
+        'com.android.browser.BrowserActivity',
+        None,
+        None),
+    'chromecast_shell': PackageInfo(
+        'com.google.android.apps.mediashell',
+        'com.google.android.apps.mediashell.MediaShellActivity',
+        '/data/local/tmp/castshell-command-line',
+        None),
+    'content_shell': PackageInfo(
+        'org.chromium.content_shell_apk',
+        'org.chromium.content_shell_apk.ContentShellActivity',
+        '/data/local/tmp/content-shell-command-line',
+        None),
+    'android_webview_shell': PackageInfo(
+        'org.chromium.android_webview.shell',
+        'org.chromium.android_webview.shell.AwShellActivity',
+        '/data/local/tmp/android-webview-command-line',
+        None),
+    'gtest': PackageInfo(
+        'org.chromium.native_test',
+        'org.chromium.native_test.NativeUnitTestActivity',
+        '/data/local/tmp/chrome-native-tests-command-line',
+        None),
+    'components_browsertests': PackageInfo(
+        'org.chromium.components_browsertests_apk',
+        ('org.chromium.components_browsertests_apk' +
+         '.ComponentsBrowserTestsActivity'),
+        '/data/local/tmp/chrome-native-tests-command-line',
+        None),
+    'content_browsertests': PackageInfo(
+        'org.chromium.content_browsertests_apk',
+        'org.chromium.content_browsertests_apk.ContentBrowserTestsActivity',
+        '/data/local/tmp/chrome-native-tests-command-line',
+        None),
+    'chromedriver_webview_shell': PackageInfo(
+        'org.chromium.chromedriver_webview_shell',
+        'org.chromium.chromedriver_webview_shell.Main',
+        None,
+        None),
+}
+
+
+# Ports arrangement for various test servers used in Chrome for Android.
+# Lighttpd server will attempt to use 9000 as default port, if unavailable it
+# will find a free port from 8001 - 8999.
+LIGHTTPD_DEFAULT_PORT = 9000
+LIGHTTPD_RANDOM_PORT_FIRST = 8001
+LIGHTTPD_RANDOM_PORT_LAST = 8999
+TEST_SYNC_SERVER_PORT = 9031
+TEST_SEARCH_BY_IMAGE_SERVER_PORT = 9041
+TEST_POLICY_SERVER_PORT = 9051
+
+
+TEST_EXECUTABLE_DIR = '/data/local/tmp'
+# Directories for common java libraries for SDK build.
+# These constants are defined in build/android/ant/common.xml
+SDK_BUILD_JAVALIB_DIR = 'lib.java'
+SDK_BUILD_TEST_JAVALIB_DIR = 'test.lib.java'
+SDK_BUILD_APKS_DIR = 'apks'
+
+ADB_KEYS_FILE = '/data/misc/adb/adb_keys'
+
+PERF_OUTPUT_DIR = os.path.join(DIR_SOURCE_ROOT, 'out', 'step_results')
+# The directory on the device where perf test output gets saved to.
+DEVICE_PERF_OUTPUT_DIR = (
+    '/data/data/' + PACKAGE_INFO['chrome'].package + '/files')
+
+SCREENSHOTS_DIR = os.path.join(DIR_SOURCE_ROOT, 'out_screenshots')
+
+ANDROID_SDK_VERSION = version_codes.MARSHMALLOW
+ANDROID_SDK_BUILD_TOOLS_VERSION = '23.0.1'
+ANDROID_SDK_ROOT = os.path.join(DIR_SOURCE_ROOT,
+                                'third_party', 'android_tools', 'sdk')
+ANDROID_SDK_TOOLS = os.path.join(ANDROID_SDK_ROOT,
+                                 'build-tools', ANDROID_SDK_BUILD_TOOLS_VERSION)
+ANDROID_NDK_ROOT = os.path.join(DIR_SOURCE_ROOT,
+                                'third_party', 'android_tools', 'ndk')
+
+PROGUARD_SCRIPT_PATH = os.path.join(
+    ANDROID_SDK_ROOT, 'tools', 'proguard', 'bin', 'proguard.sh')
+
+PROGUARD_ROOT = os.path.join(DIR_SOURCE_ROOT, 'third_party', 'proguard')
+
+BAD_DEVICES_JSON = os.path.join(DIR_SOURCE_ROOT,
+                                os.environ.get('CHROMIUM_OUT_DIR', 'out'),
+                                'bad_devices.json')
+
+UPSTREAM_FLAKINESS_SERVER = 'test-results.appspot.com'
+
+# TODO(jbudorick): Remove once unused.
+DEVICE_LOCAL_PROPERTIES_PATH = '/data/local.prop'
+
+# TODO(jbudorick): Rework this into testing/buildbot/
+PYTHON_UNIT_TEST_SUITES = {
+  'pylib_py_unittests': {
+    'path': os.path.join(DIR_SOURCE_ROOT, 'build', 'android'),
+    'test_modules': [
+      'devil.android.device_utils_test',
+      'devil.android.md5sum_test',
+      'devil.utils.cmd_helper_test',
+      'pylib.results.json_results_test',
+      'pylib.utils.proguard_test',
+    ]
+  },
+  'gyp_py_unittests': {
+    'path': os.path.join(DIR_SOURCE_ROOT, 'build', 'android', 'gyp'),
+    'test_modules': [
+      'java_cpp_enum_tests',
+      'java_google_api_keys_tests',
+    ]
+  },
+}
+
+LOCAL_MACHINE_TESTS = ['junit', 'python']
+VALID_ENVIRONMENTS = ['local', 'remote_device']
+VALID_TEST_TYPES = ['gtest', 'instrumentation', 'junit', 'linker', 'monkey',
+                    'perf', 'python', 'uirobot']
+VALID_DEVICE_TYPES = ['Android', 'iOS']
+
+
+def GetBuildType():
+  try:
+    return os.environ['BUILDTYPE']
+  except KeyError:
+    raise EnvironmentError(
+        'The BUILDTYPE environment variable has not been set')
+
+
+def SetBuildType(build_type):
+  os.environ['BUILDTYPE'] = build_type
+
+
+def SetBuildDirectory(build_directory):
+  os.environ['CHROMIUM_OUT_DIR'] = build_directory
+
+
+def SetOutputDirectory(output_directory):
+  os.environ['CHROMIUM_OUTPUT_DIR'] = output_directory
+
+
+def GetOutDirectory(build_type=None):
+  """Returns the out directory where the output binaries are built.
+
+  Args:
+    build_type: Build type, generally 'Debug' or 'Release'. Defaults to the
+      globally set build type environment variable BUILDTYPE.
+  """
+  if 'CHROMIUM_OUTPUT_DIR' in os.environ:
+    return os.path.abspath(os.path.join(
+        DIR_SOURCE_ROOT, os.environ.get('CHROMIUM_OUTPUT_DIR')))
+
+  return os.path.abspath(os.path.join(
+      DIR_SOURCE_ROOT, os.environ.get('CHROMIUM_OUT_DIR', 'out'),
+      GetBuildType() if build_type is None else build_type))
+
+
+def CheckOutputDirectory():
+  """Checks that CHROMIUM_OUT_DIR or CHROMIUM_OUTPUT_DIR is set.
+
+  If neither are set, but the current working directory is a build directory,
+  then CHROMIUM_OUTPUT_DIR is set to the current working directory.
+
+  Raises:
+    Exception: If no output directory is detected.
+  """
+  output_dir = os.environ.get('CHROMIUM_OUTPUT_DIR')
+  out_dir = os.environ.get('CHROMIUM_OUT_DIR')
+  if not output_dir and not out_dir:
+    # If CWD is an output directory, then assume it's the desired one.
+    if os.path.exists('build.ninja'):
+      output_dir = os.getcwd()
+      SetOutputDirectory(output_dir)
+    elif os.environ.get('CHROME_HEADLESS'):
+      # When running on bots, see if the output directory is obvious.
+      dirs = glob.glob(os.path.join(DIR_SOURCE_ROOT, 'out', '*', 'build.ninja'))
+      if len(dirs) == 1:
+        SetOutputDirectory(dirs[0])
+      else:
+        raise Exception('Neither CHROMIUM_OUTPUT_DIR nor CHROMIUM_OUT_DIR '
+                        'has been set. CHROME_HEADLESS detected, but multiple '
+                        'out dirs exist: %r' % dirs)
+    else:
+      raise Exception('Neither CHROMIUM_OUTPUT_DIR nor CHROMIUM_OUT_DIR '
+                      'has been set')
+
+
+# TODO(jbudorick): Convert existing callers to AdbWrapper.GetAdbPath() and
+# remove this.
+def GetAdbPath():
+  from devil.android.sdk import adb_wrapper
+  return adb_wrapper.AdbWrapper.GetAdbPath()
+
+
+# Exit codes
+ERROR_EXIT_CODE = exit_codes.ERROR
+INFRA_EXIT_CODE = exit_codes.INFRA
+WARNING_EXIT_CODE = exit_codes.WARNING
diff --git a/build/android/pylib/constants/host_paths.py b/build/android/pylib/constants/host_paths.py
new file mode 100644
index 0000000..98aa53d
--- /dev/null
+++ b/build/android/pylib/constants/host_paths.py
@@ -0,0 +1,38 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import contextlib
+import os
+import sys
+
+DIR_SOURCE_ROOT = os.environ.get(
+    'CHECKOUT_SOURCE_ROOT',
+    os.path.abspath(os.path.join(os.path.dirname(__file__),
+                                 os.pardir, os.pardir, os.pardir, os.pardir)))
+
+BUILD_COMMON_PATH = os.path.join(
+    DIR_SOURCE_ROOT, 'build', 'util', 'lib', 'common')
+
+# third-party libraries
+ANDROID_PLATFORM_DEVELOPMENT_SCRIPTS_PATH = os.path.join(
+    DIR_SOURCE_ROOT, 'third_party', 'android_platform', 'development',
+    'scripts')
+DEVIL_PATH = os.path.join(
+    DIR_SOURCE_ROOT, 'third_party', 'catapult', 'devil')
+PYMOCK_PATH = os.path.join(
+    DIR_SOURCE_ROOT, 'third_party', 'pymock')
+
+@contextlib.contextmanager
+def SysPath(path, position=None):
+  if position is None:
+    sys.path.append(path)
+  else:
+    sys.path.insert(position, path)
+  try:
+    yield
+  finally:
+    if sys.path[-1] == path:
+      sys.path.pop()
+    else:
+      sys.path.remove(path)
diff --git a/build/android/pylib/content_settings.py b/build/android/pylib/content_settings.py
new file mode 100644
index 0000000..3bf11bc
--- /dev/null
+++ b/build/android/pylib/content_settings.py
@@ -0,0 +1,80 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+class ContentSettings(dict):
+
+  """A dict interface to interact with device content settings.
+
+  System properties are key/value pairs as exposed by adb shell content.
+  """
+
+  def __init__(self, table, device):
+    super(ContentSettings, self).__init__()
+    self._table = table
+    self._device = device
+
+  @staticmethod
+  def _GetTypeBinding(value):
+    if isinstance(value, bool):
+      return 'b'
+    if isinstance(value, float):
+      return 'f'
+    if isinstance(value, int):
+      return 'i'
+    if isinstance(value, long):
+      return 'l'
+    if isinstance(value, str):
+      return 's'
+    raise ValueError('Unsupported type %s' % type(value))
+
+  def iteritems(self):
+    # Example row:
+    # 'Row: 0 _id=13, name=logging_id2, value=-1fccbaa546705b05'
+    for row in self._device.RunShellCommand(
+        'content query --uri content://%s' % self._table, as_root=True):
+      fields = row.split(', ')
+      key = None
+      value = None
+      for field in fields:
+        k, _, v = field.partition('=')
+        if k == 'name':
+          key = v
+        elif k == 'value':
+          value = v
+      if not key:
+        continue
+      if not value:
+        value = ''
+      yield key, value
+
+  def __getitem__(self, key):
+    return self._device.RunShellCommand(
+        'content query --uri content://%s --where "name=\'%s\'" '
+        '--projection value' % (self._table, key), as_root=True).strip()
+
+  def __setitem__(self, key, value):
+    if key in self:
+      self._device.RunShellCommand(
+          'content update --uri content://%s '
+          '--bind value:%s:%s --where "name=\'%s\'"' % (
+              self._table,
+              self._GetTypeBinding(value), value, key),
+          as_root=True)
+    else:
+      self._device.RunShellCommand(
+          'content insert --uri content://%s '
+          '--bind name:%s:%s --bind value:%s:%s' % (
+              self._table,
+              self._GetTypeBinding(key), key,
+              self._GetTypeBinding(value), value),
+          as_root=True)
+
+  def __delitem__(self, key):
+    self._device.RunShellCommand(
+        'content delete --uri content://%s '
+        '--bind name:%s:%s' % (
+            self._table,
+            self._GetTypeBinding(key), key),
+        as_root=True)
diff --git a/build/android/pylib/device/OWNERS b/build/android/pylib/device/OWNERS
new file mode 100644
index 0000000..c35d7ac
--- /dev/null
+++ b/build/android/pylib/device/OWNERS
@@ -0,0 +1,2 @@
+jbudorick@chromium.org
+perezju@chromium.org
diff --git a/build/android/pylib/device/__init__.py b/build/android/pylib/device/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/build/android/pylib/device/__init__.py
diff --git a/build/android/pylib/device/adb_wrapper.py b/build/android/pylib/device/adb_wrapper.py
new file mode 100644
index 0000000..f66619f
--- /dev/null
+++ b/build/android/pylib/device/adb_wrapper.py
@@ -0,0 +1,8 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=unused-wildcard-import
+# pylint: disable=wildcard-import
+
+from devil.android.sdk.adb_wrapper import *
diff --git a/build/android/pylib/device/battery_utils.py b/build/android/pylib/device/battery_utils.py
new file mode 100644
index 0000000..95c5613
--- /dev/null
+++ b/build/android/pylib/device/battery_utils.py
@@ -0,0 +1,8 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=unused-wildcard-import
+# pylint: disable=wildcard-import
+
+from devil.android.battery_utils import *
diff --git a/build/android/pylib/device/commands/BUILD.gn b/build/android/pylib/device/commands/BUILD.gn
new file mode 100644
index 0000000..c6d4b42
--- /dev/null
+++ b/build/android/pylib/device/commands/BUILD.gn
@@ -0,0 +1,17 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/config/android/rules.gni")
+
+group("commands") {
+  data_deps = [
+    ":chromium_commands",
+  ]
+}
+
+# GYP: //build/android/pylib/device/commands/commands.gyp:chromium_commands
+android_library("chromium_commands") {
+  java_files = [ "java/src/org/chromium/android/commands/unzip/Unzip.java" ]
+  dex_path = "$root_build_dir/lib.java/chromium_commands.dex.jar"
+}
diff --git a/build/android/pylib/device/commands/commands.gyp b/build/android/pylib/device/commands/commands.gyp
new file mode 100644
index 0000000..b5b5bc8
--- /dev/null
+++ b/build/android/pylib/device/commands/commands.gyp
@@ -0,0 +1,20 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+  'targets': [
+    {
+      # GN version: //build/android/pylib/devices/commands:chromium_commands
+      'target_name': 'chromium_commands',
+      'type': 'none',
+      'variables': {
+        'add_to_dependents_classpaths': 0,
+        'java_in_dir': ['java'],
+      },
+      'includes': [
+        '../../../../../build/java.gypi',
+      ],
+    }
+  ],
+}
diff --git a/build/android/pylib/device/commands/java/src/org/chromium/android/commands/unzip/Unzip.java b/build/android/pylib/device/commands/java/src/org/chromium/android/commands/unzip/Unzip.java
new file mode 100644
index 0000000..7cbbb73
--- /dev/null
+++ b/build/android/pylib/device/commands/java/src/org/chromium/android/commands/unzip/Unzip.java
@@ -0,0 +1,95 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.android.commands.unzip;
+
+import android.util.Log;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.PrintStream;
+import java.util.zip.ZipEntry;
+import java.util.zip.ZipInputStream;
+
+/**
+ *  Minimal implementation of the command-line unzip utility for Android.
+ */
+public class Unzip {
+
+    private static final String TAG = "Unzip";
+
+    public static void main(String[] args) {
+        try {
+            (new Unzip()).run(args);
+        } catch (RuntimeException e) {
+            Log.e(TAG, e.toString());
+            System.exit(1);
+        }
+    }
+
+    private void showUsage(PrintStream s) {
+        s.println("Usage:");
+        s.println("unzip [zipfile]");
+    }
+
+    @SuppressWarnings("Finally")
+    private void unzip(String[] args) {
+        ZipInputStream zis = null;
+        try {
+            String zipfile = args[0];
+            zis = new ZipInputStream(new BufferedInputStream(new FileInputStream(zipfile)));
+            ZipEntry ze = null;
+
+            byte[] bytes = new byte[1024];
+            while ((ze = zis.getNextEntry()) != null) {
+                File outputFile = new File(ze.getName());
+                if (ze.isDirectory()) {
+                    if (!outputFile.exists() && !outputFile.mkdirs()) {
+                        throw new RuntimeException(
+                                "Failed to create directory: " + outputFile.toString());
+                    }
+                } else {
+                    File parentDir = outputFile.getParentFile();
+                    if (!parentDir.exists() && !parentDir.mkdirs()) {
+                        throw new RuntimeException(
+                                "Failed to create directory: " + parentDir.toString());
+                    }
+                    OutputStream out = new BufferedOutputStream(new FileOutputStream(outputFile));
+                    int actual_bytes = 0;
+                    int total_bytes = 0;
+                    while ((actual_bytes = zis.read(bytes)) != -1) {
+                        out.write(bytes, 0, actual_bytes);
+                        total_bytes += actual_bytes;
+                    }
+                    out.close();
+                }
+                zis.closeEntry();
+            }
+
+        } catch (IOException e) {
+            throw new RuntimeException("Error while unzipping: " + e.toString());
+        } finally {
+            try {
+                if (zis != null) zis.close();
+            } catch (IOException e) {
+                throw new RuntimeException("Error while closing zip: " + e.toString());
+            }
+        }
+    }
+
+    public void run(String[] args) {
+        if (args.length != 1) {
+            showUsage(System.err);
+            throw new RuntimeException("Incorrect usage.");
+        }
+
+        unzip(args);
+    }
+}
+
diff --git a/build/android/pylib/device/decorators.py b/build/android/pylib/device/decorators.py
new file mode 100644
index 0000000..f8c2fdd
--- /dev/null
+++ b/build/android/pylib/device/decorators.py
@@ -0,0 +1,8 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=unused-wildcard-import
+# pylint: disable=wildcard-import
+
+from devil.android.decorators import *
diff --git a/build/android/pylib/device/device_blacklist.py b/build/android/pylib/device/device_blacklist.py
new file mode 100644
index 0000000..fad1ca6
--- /dev/null
+++ b/build/android/pylib/device/device_blacklist.py
@@ -0,0 +1,8 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=unused-wildcard-import
+# pylint: disable=wildcard-import
+
+from devil.android.device_blacklist import *
diff --git a/build/android/pylib/device/device_errors.py b/build/android/pylib/device/device_errors.py
new file mode 100644
index 0000000..cb09c3c
--- /dev/null
+++ b/build/android/pylib/device/device_errors.py
@@ -0,0 +1,8 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=unused-wildcard-import
+# pylint: disable=wildcard-import
+
+from devil.android.device_errors import *
diff --git a/build/android/pylib/device/device_list.py b/build/android/pylib/device/device_list.py
new file mode 100644
index 0000000..a730277
--- /dev/null
+++ b/build/android/pylib/device/device_list.py
@@ -0,0 +1,8 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=unused-wildcard-import
+# pylint: disable=wildcard-import
+
+from devil.android.device_list import *
diff --git a/build/android/pylib/device/device_utils.py b/build/android/pylib/device/device_utils.py
new file mode 100644
index 0000000..b8e8de2
--- /dev/null
+++ b/build/android/pylib/device/device_utils.py
@@ -0,0 +1,8 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=unused-wildcard-import
+# pylint: disable=wildcard-import
+
+from devil.android.device_utils import *
diff --git a/build/android/pylib/device/intent.py b/build/android/pylib/device/intent.py
new file mode 100644
index 0000000..cb6fb68
--- /dev/null
+++ b/build/android/pylib/device/intent.py
@@ -0,0 +1,8 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=unused-wildcard-import
+# pylint: disable=wildcard-import
+
+from devil.android.sdk.intent import *
diff --git a/build/android/pylib/device/logcat_monitor.py b/build/android/pylib/device/logcat_monitor.py
new file mode 100644
index 0000000..0e492cb
--- /dev/null
+++ b/build/android/pylib/device/logcat_monitor.py
@@ -0,0 +1,8 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=unused-wildcard-import
+# pylint: disable=wildcard-import
+
+from devil.android.logcat_monitor import *
diff --git a/build/android/pylib/device/shared_prefs.py b/build/android/pylib/device/shared_prefs.py
new file mode 100644
index 0000000..38db76a
--- /dev/null
+++ b/build/android/pylib/device/shared_prefs.py
@@ -0,0 +1,8 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=unused-wildcard-import
+# pylint: disable=wildcard-import
+
+from devil.android.sdk.shared_prefs import *
diff --git a/build/android/pylib/device_settings.py b/build/android/pylib/device_settings.py
new file mode 100644
index 0000000..ab4ad1b
--- /dev/null
+++ b/build/android/pylib/device_settings.py
@@ -0,0 +1,199 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+
+from pylib import content_settings
+
+_LOCK_SCREEN_SETTINGS_PATH = '/data/system/locksettings.db'
+_ALTERNATE_LOCK_SCREEN_SETTINGS_PATH = (
+    '/data/data/com.android.providers.settings/databases/settings.db')
+PASSWORD_QUALITY_UNSPECIFIED = '0'
+_COMPATIBLE_BUILD_TYPES = ['userdebug', 'eng']
+
+
+def ConfigureContentSettings(device, desired_settings):
+  """Configures device content setings from a list.
+
+  Many settings are documented at:
+    http://developer.android.com/reference/android/provider/Settings.Global.html
+    http://developer.android.com/reference/android/provider/Settings.Secure.html
+    http://developer.android.com/reference/android/provider/Settings.System.html
+
+  Many others are undocumented.
+
+  Args:
+    device: A DeviceUtils instance for the device to configure.
+    desired_settings: A list of (table, [(key: value), ...]) for all
+        settings to configure.
+  """
+  for table, key_value in desired_settings:
+    settings = content_settings.ContentSettings(table, device)
+    for key, value in key_value:
+      settings[key] = value
+    logging.info('\n%s %s', table, (80 - len(table)) * '-')
+    for key, value in sorted(settings.iteritems()):
+      logging.info('\t%s: %s', key, value)
+
+
+def SetLockScreenSettings(device):
+  """Sets lock screen settings on the device.
+
+  On certain device/Android configurations we need to disable the lock screen in
+  a different database. Additionally, the password type must be set to
+  DevicePolicyManager.PASSWORD_QUALITY_UNSPECIFIED.
+  Lock screen settings are stored in sqlite on the device in:
+      /data/system/locksettings.db
+
+  IMPORTANT: The first column is used as a primary key so that all rows with the
+  same value for that column are removed from the table prior to inserting the
+  new values.
+
+  Args:
+    device: A DeviceUtils instance for the device to configure.
+
+  Raises:
+    Exception if the setting was not properly set.
+  """
+  if device.build_type not in _COMPATIBLE_BUILD_TYPES:
+    logging.warning('Unable to disable lockscreen on %s builds.',
+                    device.build_type)
+    return
+
+  def get_lock_settings(table):
+    return [(table, 'lockscreen.disabled', '1'),
+            (table, 'lockscreen.password_type', PASSWORD_QUALITY_UNSPECIFIED),
+            (table, 'lockscreen.password_type_alternate',
+             PASSWORD_QUALITY_UNSPECIFIED)]
+
+  if device.FileExists(_LOCK_SCREEN_SETTINGS_PATH):
+    db = _LOCK_SCREEN_SETTINGS_PATH
+    locksettings = get_lock_settings('locksettings')
+    columns = ['name', 'user', 'value']
+    generate_values = lambda k, v: [k, '0', v]
+  elif device.FileExists(_ALTERNATE_LOCK_SCREEN_SETTINGS_PATH):
+    db = _ALTERNATE_LOCK_SCREEN_SETTINGS_PATH
+    locksettings = get_lock_settings('secure') + get_lock_settings('system')
+    columns = ['name', 'value']
+    generate_values = lambda k, v: [k, v]
+  else:
+    logging.warning('Unable to find database file to set lock screen settings.')
+    return
+
+  for table, key, value in locksettings:
+    # Set the lockscreen setting for default user '0'
+    values = generate_values(key, value)
+
+    cmd = """begin transaction;
+delete from '%(table)s' where %(primary_key)s='%(primary_value)s';
+insert into '%(table)s' (%(columns)s) values (%(values)s);
+commit transaction;""" % {
+      'table': table,
+      'primary_key': columns[0],
+      'primary_value': values[0],
+      'columns': ', '.join(columns),
+      'values': ', '.join(["'%s'" % value for value in values])
+    }
+    output_msg = device.RunShellCommand('sqlite3 %s "%s"' % (db, cmd),
+                                        as_root=True)
+    if output_msg:
+      logging.info(' '.join(output_msg))
+
+
+ENABLE_LOCATION_SETTINGS = [
+  # Note that setting these in this order is required in order for all of
+  # them to take and stick through a reboot.
+  ('com.google.settings/partner', [
+    ('use_location_for_services', 1),
+  ]),
+  ('settings/secure', [
+    # Ensure Geolocation is enabled and allowed for tests.
+    ('location_providers_allowed', 'gps,network'),
+  ]),
+  ('com.google.settings/partner', [
+    ('network_location_opt_in', 1),
+  ])
+]
+
+DISABLE_LOCATION_SETTINGS = [
+  ('com.google.settings/partner', [
+    ('use_location_for_services', 0),
+  ]),
+  ('settings/secure', [
+    # Ensure Geolocation is disabled.
+    ('location_providers_allowed', ''),
+  ]),
+]
+
+ENABLE_MOCK_LOCATION_SETTINGS = [
+  ('settings/secure', [
+    ('mock_location', 1),
+  ]),
+]
+
+DISABLE_MOCK_LOCATION_SETTINGS = [
+  ('settings/secure', [
+    ('mock_location', 0),
+  ]),
+]
+
+DETERMINISTIC_DEVICE_SETTINGS = [
+  ('settings/global', [
+    ('assisted_gps_enabled', 0),
+
+    # Disable "auto time" and "auto time zone" to avoid network-provided time
+    # to overwrite the device's datetime and timezone synchronized from host
+    # when running tests later. See b/6569849.
+    ('auto_time', 0),
+    ('auto_time_zone', 0),
+
+    ('development_settings_enabled', 1),
+
+    # Flag for allowing ActivityManagerService to send ACTION_APP_ERROR intents
+    # on application crashes and ANRs. If this is disabled, the crash/ANR dialog
+    # will never display the "Report" button.
+    # Type: int ( 0 = disallow, 1 = allow )
+    ('send_action_app_error', 0),
+
+    ('stay_on_while_plugged_in', 3),
+
+    ('verifier_verify_adb_installs', 0),
+  ]),
+  ('settings/secure', [
+    ('allowed_geolocation_origins',
+        'http://www.google.co.uk http://www.google.com'),
+
+    # Ensure that we never get random dialogs like "Unfortunately the process
+    # android.process.acore has stopped", which steal the focus, and make our
+    # automation fail (because the dialog steals the focus then mistakenly
+    # receives the injected user input events).
+    ('anr_show_background', 0),
+
+    ('lockscreen.disabled', 1),
+
+    ('screensaver_enabled', 0),
+
+    ('skip_first_use_hints', 1),
+  ]),
+  ('settings/system', [
+    # Don't want devices to accidentally rotate the screen as that could
+    # affect performance measurements.
+    ('accelerometer_rotation', 0),
+
+    ('lockscreen.disabled', 1),
+
+    # Turn down brightness and disable auto-adjust so that devices run cooler.
+    ('screen_brightness', 5),
+    ('screen_brightness_mode', 0),
+
+    ('user_rotation', 0),
+  ]),
+]
+
+NETWORK_DISABLED_SETTINGS = [
+  ('settings/global', [
+    ('airplane_mode_on', 1),
+    ('wifi_on', 0),
+  ]),
+]
diff --git a/build/android/pylib/device_signal.py b/build/android/pylib/device_signal.py
new file mode 100644
index 0000000..ca57690
--- /dev/null
+++ b/build/android/pylib/device_signal.py
@@ -0,0 +1,8 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=unused-wildcard-import
+# pylint: disable=wildcard-import
+
+from devil.android.device_signal import *
diff --git a/build/android/pylib/gtest/__init__.py b/build/android/pylib/gtest/__init__.py
new file mode 100644
index 0000000..727e987
--- /dev/null
+++ b/build/android/pylib/gtest/__init__.py
@@ -0,0 +1,4 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
diff --git a/build/android/pylib/gtest/filter/OWNERS b/build/android/pylib/gtest/filter/OWNERS
new file mode 100644
index 0000000..72e8ffc
--- /dev/null
+++ b/build/android/pylib/gtest/filter/OWNERS
@@ -0,0 +1 @@
+*
diff --git a/build/android/pylib/gtest/filter/base_unittests_disabled b/build/android/pylib/gtest/filter/base_unittests_disabled
new file mode 100644
index 0000000..bf2311d
--- /dev/null
+++ b/build/android/pylib/gtest/filter/base_unittests_disabled
@@ -0,0 +1,28 @@
+# List of suppressions
+
+# Android will not support StackTrace.
+StackTrace.*
+#
+# Sometimes this is automatically generated by run_tests.py
+VerifyPathControlledByUserTest.Symlinks
+
+# http://crbug.com/138845
+MessagePumpLibeventTest.TestWatchingFromBadThread
+
+StringPrintfTest.StringPrintfMisc
+StringPrintfTest.StringAppendfString
+StringPrintfTest.StringAppendfInt
+StringPrintfTest.StringPrintfBounds
+ProcessUtilTest.GetAppOutputRestrictedSIGPIPE
+# TODO(jrg): Fails on bots.  Works locally.  Figure out why.  2/6/12
+FieldTrialTest.*
+# Flaky?
+ScopedJavaRefTest.RefCounts
+# Death tests are not supported with apks.
+*DeathTest*
+FileTest.MemoryCorruption
+MessagePumpLibeventTest.QuitOutsideOfRun
+ScopedFD.ScopedFDCrashesOnCloseFailure
+
+# http://crbug.com/245043
+StackContainer.BufferAlignment
diff --git a/build/android/pylib/gtest/filter/base_unittests_emulator_additional_disabled b/build/android/pylib/gtest/filter/base_unittests_emulator_additional_disabled
new file mode 100644
index 0000000..85e8fd6
--- /dev/null
+++ b/build/android/pylib/gtest/filter/base_unittests_emulator_additional_disabled
@@ -0,0 +1,10 @@
+# Addtional list of suppressions from emulator
+#
+# Automatically generated by run_tests.py
+PathServiceTest.Get
+SharedMemoryTest.OpenClose
+StringPrintfTest.StringAppendfInt
+StringPrintfTest.StringAppendfString
+StringPrintfTest.StringPrintfBounds
+StringPrintfTest.StringPrintfMisc
+VerifyPathControlledByUserTest.Symlinks
diff --git a/build/android/pylib/gtest/filter/breakpad_unittests_disabled b/build/android/pylib/gtest/filter/breakpad_unittests_disabled
new file mode 100644
index 0000000..cefc64f
--- /dev/null
+++ b/build/android/pylib/gtest/filter/breakpad_unittests_disabled
@@ -0,0 +1,9 @@
+FileIDStripTest.StripSelf
+# crbug.com/303960
+ExceptionHandlerTest.InstructionPointerMemoryNullPointer
+# crbug.com/171419
+MinidumpWriterTest.MappingInfoContained
+# crbug.com/310088
+MinidumpWriterTest.MinidumpSizeLimit
+# crbug.com/375838
+ElfCoreDumpTest.ValidCoreFile
diff --git a/build/android/pylib/gtest/filter/content_browsertests_disabled b/build/android/pylib/gtest/filter/content_browsertests_disabled
new file mode 100644
index 0000000..4b28a2b
--- /dev/null
+++ b/build/android/pylib/gtest/filter/content_browsertests_disabled
@@ -0,0 +1,57 @@
+# List of suppressions
+# Timeouts
+Http/MediaTest.*
+File/MediaTest.*
+MediaTest.*
+DatabaseTest.*
+
+# Crashes
+RenderFrameHostManagerTest.IgnoreRendererDebugURLsWhenCrashed
+
+# Plugins are not supported.
+BrowserPluginThreadedCompositorPixelTest.*
+BrowserPluginHostTest.*
+BrowserPluginTest.*
+PluginTest.*
+
+# http://crbug.com/463740
+CrossPlatformAccessibilityBrowserTest.SelectedEditableTextAccessibility
+
+# http://crbug.com/297230
+DumpAccessibilityTreeTest.AccessibilityAriaLevel
+DumpAccessibilityTreeTest.AccessibilityAriaProgressbar
+DumpAccessibilityTreeTest.AccessibilityListMarkers
+DumpAccessibilityTreeTest.AccessibilityUl
+DumpAccessibilityTreeTest.AccessibilityCanvas
+RendererAccessibilityTest.DetachAccessibilityObject
+DumpAccessibilityTreeTest.AccessibilityDialog
+DumpAccessibilityTreeTest.AccessibilityModalDialogClosed
+DumpAccessibilityTreeTest.AccessibilityModalDialogInIframeOpened
+RendererAccessibilityTest.EventOnObjectNotInTree
+
+# http://crbug.com/187500
+RenderViewImplTest.ImeComposition
+RenderViewImplTest.InsertCharacters
+RenderViewImplTest.OnHandleKeyboardEvent
+RenderViewImplTest.OnNavStateChanged
+# ZoomLevel is not used on Android
+RenderViewImplTest.ZoomLimit
+RendererAccessibilityTest.SendFullAccessibilityTreeOnReload
+RendererAccessibilityTest.HideAccessibilityObject
+RendererAccessibilityTest.ShowAccessibilityObject
+RendererAccessibilityTest.TextSelectionShouldSendRoot
+
+# http://crbug.com/386227
+IndexedDBBrowserTest.VersionChangeCrashResilience
+
+# http://crbug.com/233118
+IndexedDBBrowserTest.NullKeyPathPersistence
+
+# http://crbug.com/342525
+IndexedDBBrowserTestSingleProcess.RenderThreadShutdownTest
+
+# http://crbug.com/338421
+GinBrowserTest.GinAndGarbageCollection
+
+# http://crbug.com/343604
+MSE_ClearKey/EncryptedMediaTest.ConfigChangeVideo/0
diff --git a/build/android/pylib/gtest/filter/unit_tests_disabled b/build/android/pylib/gtest/filter/unit_tests_disabled
new file mode 100644
index 0000000..51e1930
--- /dev/null
+++ b/build/android/pylib/gtest/filter/unit_tests_disabled
@@ -0,0 +1,118 @@
+# List of suppressions
+
+# The UDP related tests currently do not work on Android because
+# we lack a UDP forwarder tool.
+NetworkStatsTestUDP.*
+
+# Missing test resource of 16MB.
+HistoryProfileTest.TypicalProfileVersion
+
+# crbug.com/139408
+SQLitePersistentCookieStoreTest.TestDontLoadOldSessionCookies
+SQLitePersistentCookieStoreTest.PersistIsPersistent
+
+# crbug.com/139433
+AutofillTableTest.AutofillProfile*
+AutofillTableTest.UpdateAutofillProfile
+
+# crbug.com/139400
+AutofillProfileTest.*
+CreditCardTest.SetInfoExpirationMonth
+
+# crbug.com/139398
+DownloadItemModelTest.InterruptTooltip
+
+# Tests crashing in the APK
+# l10n_util.cc(655)] Check failed: std::string::npos != pos
+DownloadItemModelTest.InterruptStatus
+# l10n_util.cc(655)] Check failed: std::string::npos != pos
+WebsiteSettingsTest.OnSiteDataAccessed
+
+# crbug.com/139423
+ValueStoreFrontendTest.GetExistingData
+
+# crbug.com/139421
+ChromeSelectFilePolicyTest.ExpectAsynchronousListenerCall
+
+# http://crbug.com/139033
+ChromeDownloadManagerDelegateTest.StartDownload_PromptAlways
+
+# Extension support is limited on Android.
+# Some of these can be enabled if we register extension related prefs in
+# browser_prefs.cc
+ExtensionTest.*
+ExtensionAPI.*
+ExtensionFileUtilTest.*
+ExtensionPermissionsTest.*
+ExtensionUnpackerTest.*
+ActiveTabTest.*
+ExtensionAppsPromo.*
+ComponentLoaderTest.*
+ExtensionFromUserScript.*
+ExtensionFromWebApp.*
+ExtensionIconManagerTest.*
+ExtensionServiceTest.*
+ExtensionServiceTestSimple.*
+ExtensionSourcePriorityTest.*
+ExtensionSpecialStoragePolicyTest.*
+ExternalPolicyProviderTest.*
+ExternalProviderImplTest.*
+MenuManagerTest.*
+PageActionControllerTest.*
+PermissionsUpdaterTest.*
+ImageLoaderTest.*
+ImageLoadingTrackerTest.*
+ExtensionSettingsFrontendTest.*
+ExtensionSettingsSyncTest.*
+ExtensionUpdaterTest.*
+UserScriptListenerTest.*
+WebApplicationTest.GetShortcutInfoForTab
+ExtensionActionIconFactoryTest.*
+
+# crbug.com/139411
+AutocompleteProviderTest.*
+HistoryContentsProviderBodyOnlyTest.*
+HistoryContentsProviderTest.*
+HQPOrderingTest.*
+SearchProviderTest.*
+
+ProtocolHandlerRegistryTest.TestOSRegistrationFailure
+
+# crbug.com/139418
+SQLiteServerBoundCertStoreTest.TestUpgradeV1
+SQLiteServerBoundCertStoreTest.TestUpgradeV2
+
+ProfileSyncComponentsFactoryImplTest.*
+PermissionsTest.GetWarningMessages_Plugins
+ImageOperations.ResizeShouldAverageColors
+
+# crbug.com/138275
+PrerenderTest.*
+
+# crbug.com/139643
+VariationsUtilTest.DisableAfterInitialization
+VariationsUtilTest.AssociateGoogleVariationID
+VariationsUtilTest.NoAssociation
+
+# crbug.com/141473
+AutofillManagerTest.UpdatePasswordSyncState
+AutofillManagerTest.UpdatePasswordGenerationState
+
+# crbug.com/144227
+ExtensionIconImageTest.*
+
+# crbug.com/145843
+EntropyProviderTest.UseOneTimeRandomizationSHA1
+EntropyProviderTest.UseOneTimeRandomizationPermuted
+
+# crbug.com/147500
+ManifestTest.RestrictedKeys
+
+# crbug.com/152599
+SyncSearchEngineDataTypeControllerTest.*
+
+# crbug.com/256259
+DiagnosticsModelTest.RunAll
+
+# Death tests are not supported with apks.
+*DeathTest*
diff --git a/build/android/pylib/gtest/gtest_config.py b/build/android/pylib/gtest/gtest_config.py
new file mode 100644
index 0000000..c80ba7f
--- /dev/null
+++ b/build/android/pylib/gtest/gtest_config.py
@@ -0,0 +1,57 @@
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Configuration file for android gtest suites."""
+
+# Add new suites here before upgrading them to the stable list below.
+EXPERIMENTAL_TEST_SUITES = [
+    'components_browsertests',
+    'heap_profiler_unittests',
+    'devtools_bridge_tests',
+]
+
+TELEMETRY_EXPERIMENTAL_TEST_SUITES = [
+    'telemetry_unittests',
+]
+
+# Do not modify this list without approval of an android owner.
+# This list determines which suites are run by default, both for local
+# testing and on android trybots running on commit-queue.
+STABLE_TEST_SUITES = [
+    'android_webview_unittests',
+    'base_unittests',
+    'breakpad_unittests',
+    'cc_unittests',
+    'components_unittests',
+    'content_browsertests',
+    'content_unittests',
+    'events_unittests',
+    'gl_tests',
+    'gl_unittests',
+    'gpu_unittests',
+    'ipc_tests',
+    'media_unittests',
+    'midi_unittests',
+    'net_unittests',
+    'sandbox_linux_unittests',
+    'skia_unittests',
+    'sql_unittests',
+    'sync_unit_tests',
+    'ui_android_unittests',
+    'ui_base_unittests',
+    'ui_touch_selection_unittests',
+    'unit_tests_apk',
+    'webkit_unit_tests',
+]
+
+# Tests fail in component=shared_library build, which is required for ASan.
+# http://crbug.com/344868
+ASAN_EXCLUDED_TEST_SUITES = [
+    'breakpad_unittests',
+    'sandbox_linux_unittests',
+
+    # The internal ASAN recipe cannot run step "unit_tests_apk", this is the
+    # only internal recipe affected. See http://crbug.com/607850
+    'unit_tests_apk',
+]
diff --git a/build/android/pylib/gtest/gtest_test_instance.py b/build/android/pylib/gtest/gtest_test_instance.py
new file mode 100644
index 0000000..282b81d
--- /dev/null
+++ b/build/android/pylib/gtest/gtest_test_instance.py
@@ -0,0 +1,407 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import re
+import tempfile
+
+from devil.android import apk_helper
+from pylib import constants
+from pylib.constants import host_paths
+from pylib.base import base_test_result
+from pylib.base import test_instance
+from pylib.utils import isolator
+
+with host_paths.SysPath(host_paths.BUILD_COMMON_PATH):
+  import unittest_util # pylint: disable=import-error
+
+
+BROWSER_TEST_SUITES = [
+  'components_browsertests',
+  'content_browsertests',
+]
+
+RUN_IN_SUB_THREAD_TEST_SUITES = ['net_unittests']
+
+
+_DEFAULT_ISOLATE_FILE_PATHS = {
+    'base_unittests': 'base/base_unittests.isolate',
+    'blink_heap_unittests':
+      'third_party/WebKit/Source/platform/heap/BlinkHeapUnitTests.isolate',
+    'blink_platform_unittests':
+      'third_party/WebKit/Source/platform/blink_platform_unittests.isolate',
+    'cc_perftests': 'cc/cc_perftests.isolate',
+    'components_browsertests': 'components/components_browsertests.isolate',
+    'components_unittests': 'components/components_unittests.isolate',
+    'content_browsertests': 'content/content_browsertests.isolate',
+    'content_unittests': 'content/content_unittests.isolate',
+    'media_perftests': 'media/media_perftests.isolate',
+    'media_unittests': 'media/media_unittests.isolate',
+    'midi_unittests': 'media/midi/midi_unittests.isolate',
+    'net_unittests': 'net/net_unittests.isolate',
+    'sql_unittests': 'sql/sql_unittests.isolate',
+    'sync_unit_tests': 'sync/sync_unit_tests.isolate',
+    'ui_base_unittests': 'ui/base/ui_base_tests.isolate',
+    'unit_tests': 'chrome/unit_tests.isolate',
+    'webkit_unit_tests':
+      'third_party/WebKit/Source/web/WebKitUnitTests.isolate',
+}
+
+
+# Used for filtering large data deps at a finer grain than what's allowed in
+# isolate files since pushing deps to devices is expensive.
+# Wildcards are allowed.
+_DEPS_EXCLUSION_LIST = [
+    'chrome/test/data/extensions/api_test',
+    'chrome/test/data/extensions/secure_shell',
+    'chrome/test/data/firefox*',
+    'chrome/test/data/gpu',
+    'chrome/test/data/image_decoding',
+    'chrome/test/data/import',
+    'chrome/test/data/page_cycler',
+    'chrome/test/data/perf',
+    'chrome/test/data/pyauto_private',
+    'chrome/test/data/safari_import',
+    'chrome/test/data/scroll',
+    'chrome/test/data/third_party',
+    'third_party/hunspell_dictionaries/*.dic',
+    # crbug.com/258690
+    'webkit/data/bmp_decoder',
+    'webkit/data/ico_decoder',
+]
+
+
+_EXTRA_NATIVE_TEST_ACTIVITY = (
+    'org.chromium.native_test.NativeTestInstrumentationTestRunner.'
+        'NativeTestActivity')
+_EXTRA_RUN_IN_SUB_THREAD = (
+    'org.chromium.native_test.NativeTestActivity.RunInSubThread')
+EXTRA_SHARD_NANO_TIMEOUT = (
+    'org.chromium.native_test.NativeTestInstrumentationTestRunner.'
+        'ShardNanoTimeout')
+_EXTRA_SHARD_SIZE_LIMIT = (
+    'org.chromium.native_test.NativeTestInstrumentationTestRunner.'
+        'ShardSizeLimit')
+
+# TODO(jbudorick): Remove these once we're no longer parsing stdout to generate
+# results.
+_RE_TEST_STATUS = re.compile(
+    r'\[ +((?:RUN)|(?:FAILED)|(?:OK)|(?:CRASHED)) +\]'
+    r' ?([^ ]+)?(?: \((\d+) ms\))?$')
+_RE_TEST_RUN_STATUS = re.compile(
+    r'\[ +(PASSED|RUNNER_FAILED|CRASHED) \] ?[^ ]+')
+# Crash detection constants.
+_RE_TEST_ERROR = re.compile(r'FAILURES!!! Tests run: \d+,'
+                                    r' Failures: \d+, Errors: 1')
+_RE_TEST_CURRENTLY_RUNNING = re.compile(r'\[ERROR:.*?\]'
+                                    r' Currently running: (.*)')
+
+# TODO(jbudorick): Make this a class method of GtestTestInstance once
+# test_package_apk and test_package_exe are gone.
+def ParseGTestListTests(raw_list):
+  """Parses a raw test list as provided by --gtest_list_tests.
+
+  Args:
+    raw_list: The raw test listing with the following format:
+
+    IPCChannelTest.
+      SendMessageInChannelConnected
+    IPCSyncChannelTest.
+      Simple
+      DISABLED_SendWithTimeoutMixedOKAndTimeout
+
+  Returns:
+    A list of all tests. For the above raw listing:
+
+    [IPCChannelTest.SendMessageInChannelConnected, IPCSyncChannelTest.Simple,
+     IPCSyncChannelTest.DISABLED_SendWithTimeoutMixedOKAndTimeout]
+  """
+  ret = []
+  current = ''
+  for test in raw_list:
+    if not test:
+      continue
+    if test[0] != ' ':
+      test_case = test.split()[0]
+      if test_case.endswith('.'):
+        current = test_case
+    elif not 'YOU HAVE' in test:
+      test_name = test.split()[0]
+      ret += [current + test_name]
+  return ret
+
+
+class GtestTestInstance(test_instance.TestInstance):
+
+  def __init__(self, args, isolate_delegate, error_func):
+    super(GtestTestInstance, self).__init__()
+    # TODO(jbudorick): Support multiple test suites.
+    if len(args.suite_name) > 1:
+      raise ValueError('Platform mode currently supports only 1 gtest suite')
+    self._extract_test_list_from_filter = args.extract_test_list_from_filter
+    self._shard_timeout = args.shard_timeout
+    self._suite = args.suite_name[0]
+    self._exe_dist_dir = None
+
+    # GYP:
+    if args.executable_dist_dir:
+      self._exe_dist_dir = os.path.abspath(args.executable_dist_dir)
+    else:
+      # TODO(agrieve): Remove auto-detection once recipes pass flag explicitly.
+      exe_dist_dir = os.path.join(constants.GetOutDirectory(),
+                                  '%s__dist' % self._suite)
+
+      if os.path.exists(exe_dist_dir):
+        self._exe_dist_dir = exe_dist_dir
+
+    incremental_part = ''
+    if args.test_apk_incremental_install_script:
+      incremental_part = '_incremental'
+
+    apk_path = os.path.join(
+        constants.GetOutDirectory(), '%s_apk' % self._suite,
+        '%s-debug%s.apk' % (self._suite, incremental_part))
+    self._test_apk_incremental_install_script = (
+        args.test_apk_incremental_install_script)
+    if not os.path.exists(apk_path):
+      self._apk_helper = None
+    else:
+      self._apk_helper = apk_helper.ApkHelper(apk_path)
+      self._extras = {
+          _EXTRA_NATIVE_TEST_ACTIVITY: self._apk_helper.GetActivityName(),
+      }
+      if self._suite in RUN_IN_SUB_THREAD_TEST_SUITES:
+        self._extras[_EXTRA_RUN_IN_SUB_THREAD] = 1
+      if self._suite in BROWSER_TEST_SUITES:
+        self._extras[_EXTRA_SHARD_SIZE_LIMIT] = 1
+        self._extras[EXTRA_SHARD_NANO_TIMEOUT] = int(1e9 * self._shard_timeout)
+        self._shard_timeout = 900
+
+    if not self._apk_helper and not self._exe_dist_dir:
+      error_func('Could not find apk or executable for %s' % self._suite)
+
+    self._data_deps = []
+    if args.test_filter:
+      self._gtest_filter = args.test_filter
+    elif args.test_filter_file:
+      with open(args.test_filter_file, 'r') as f:
+        self._gtest_filter = ':'.join(l.strip() for l in f)
+    else:
+      self._gtest_filter = None
+
+    if not args.isolate_file_path:
+      default_isolate_file_path = _DEFAULT_ISOLATE_FILE_PATHS.get(self._suite)
+      if default_isolate_file_path:
+        args.isolate_file_path = os.path.join(
+            host_paths.DIR_SOURCE_ROOT, default_isolate_file_path)
+
+    if (args.isolate_file_path and
+        not isolator.IsIsolateEmpty(args.isolate_file_path)):
+      self._isolate_abs_path = os.path.abspath(args.isolate_file_path)
+      self._isolate_delegate = isolate_delegate
+      self._isolated_abs_path = os.path.join(
+          constants.GetOutDirectory(), '%s.isolated' % self._suite)
+    else:
+      logging.warning('No isolate file provided. No data deps will be pushed.')
+      self._isolate_delegate = None
+
+    if args.app_data_files:
+      self._app_data_files = args.app_data_files
+      if args.app_data_file_dir:
+        self._app_data_file_dir = args.app_data_file_dir
+      else:
+        self._app_data_file_dir = tempfile.mkdtemp()
+        logging.critical('Saving app files to %s', self._app_data_file_dir)
+    else:
+      self._app_data_files = None
+      self._app_data_file_dir = None
+
+    self._test_arguments = args.test_arguments
+
+  @property
+  def activity(self):
+    return self._apk_helper and self._apk_helper.GetActivityName()
+
+  @property
+  def apk(self):
+    return self._apk_helper and self._apk_helper.path
+
+  @property
+  def apk_helper(self):
+    return self._apk_helper
+
+  @property
+  def app_file_dir(self):
+    return self._app_data_file_dir
+
+  @property
+  def app_files(self):
+    return self._app_data_files
+
+  @property
+  def exe_dist_dir(self):
+    return self._exe_dist_dir
+
+  @property
+  def extras(self):
+    return self._extras
+
+  @property
+  def gtest_filter(self):
+    return self._gtest_filter
+
+  @property
+  def package(self):
+    return self._apk_helper and self._apk_helper.GetPackageName()
+
+  @property
+  def permissions(self):
+    return self._apk_helper and self._apk_helper.GetPermissions()
+
+  @property
+  def runner(self):
+    return self._apk_helper and self._apk_helper.GetInstrumentationName()
+
+  @property
+  def shard_timeout(self):
+    return self._shard_timeout
+
+  @property
+  def suite(self):
+    return self._suite
+
+  @property
+  def test_apk_incremental_install_script(self):
+    return self._test_apk_incremental_install_script
+
+  @property
+  def test_arguments(self):
+    return self._test_arguments
+
+  @property
+  def extract_test_list_from_filter(self):
+    return self._extract_test_list_from_filter
+
+  #override
+  def TestType(self):
+    return 'gtest'
+
+  #override
+  def SetUp(self):
+    """Map data dependencies via isolate."""
+    if self._isolate_delegate:
+      self._isolate_delegate.Remap(
+          self._isolate_abs_path, self._isolated_abs_path)
+      self._isolate_delegate.PurgeExcluded(_DEPS_EXCLUSION_LIST)
+      self._isolate_delegate.MoveOutputDeps()
+      dest_dir = None
+      self._data_deps.extend([
+          (self._isolate_delegate.isolate_deps_dir, dest_dir)])
+
+
+  def GetDataDependencies(self):
+    """Returns the test suite's data dependencies.
+
+    Returns:
+      A list of (host_path, device_path) tuples to push. If device_path is
+      None, the client is responsible for determining where to push the file.
+    """
+    return self._data_deps
+
+  def FilterTests(self, test_list, disabled_prefixes=None):
+    """Filters |test_list| based on prefixes and, if present, a filter string.
+
+    Args:
+      test_list: The list of tests to filter.
+      disabled_prefixes: A list of test prefixes to filter. Defaults to
+        DISABLED_, FLAKY_, FAILS_, PRE_, and MANUAL_
+    Returns:
+      A filtered list of tests to run.
+    """
+    gtest_filter_strings = [
+        self._GenerateDisabledFilterString(disabled_prefixes)]
+    if self._gtest_filter:
+      gtest_filter_strings.append(self._gtest_filter)
+
+    filtered_test_list = test_list
+    for gtest_filter_string in gtest_filter_strings:
+      logging.debug('Filtering tests using: %s', gtest_filter_string)
+      filtered_test_list = unittest_util.FilterTestNames(
+          filtered_test_list, gtest_filter_string)
+    return filtered_test_list
+
+  def _GenerateDisabledFilterString(self, disabled_prefixes):
+    disabled_filter_items = []
+
+    if disabled_prefixes is None:
+      disabled_prefixes = ['DISABLED_', 'FLAKY_', 'FAILS_', 'PRE_', 'MANUAL_']
+    disabled_filter_items += ['%s*' % dp for dp in disabled_prefixes]
+    disabled_filter_items += ['*.%s*' % dp for dp in disabled_prefixes]
+
+    disabled_tests_file_path = os.path.join(
+        host_paths.DIR_SOURCE_ROOT, 'build', 'android', 'pylib', 'gtest',
+        'filter', '%s_disabled' % self._suite)
+    if disabled_tests_file_path and os.path.exists(disabled_tests_file_path):
+      with open(disabled_tests_file_path) as disabled_tests_file:
+        disabled_filter_items += [
+            '%s' % l for l in (line.strip() for line in disabled_tests_file)
+            if l and not l.startswith('#')]
+
+    return '*-%s' % ':'.join(disabled_filter_items)
+
+  # pylint: disable=no-self-use
+  def ParseGTestOutput(self, output):
+    """Parses raw gtest output and returns a list of results.
+
+    Args:
+      output: A list of output lines.
+    Returns:
+      A list of base_test_result.BaseTestResults.
+    """
+    log = []
+    result_type = None
+    results = []
+    test_name = None
+    for l in output:
+      logging.info(l)
+      matcher = _RE_TEST_STATUS.match(l)
+      if matcher:
+        # Be aware that test name and status might not appear on same line.
+        test_name = matcher.group(2) if matcher.group(2) else test_name
+        duration = int(matcher.group(3)) if matcher.group(3) else 0
+        if matcher.group(1) == 'RUN':
+          log = []
+        elif matcher.group(1) == 'OK':
+          result_type = base_test_result.ResultType.PASS
+        elif matcher.group(1) == 'FAILED':
+          result_type = base_test_result.ResultType.FAIL
+        elif matcher.group(1) == 'CRASHED':
+          result_type = base_test_result.ResultType.CRASH
+
+      # Needs another matcher here to match crashes, like those of DCHECK.
+      matcher = _RE_TEST_CURRENTLY_RUNNING.match(l)
+      if matcher:
+        test_name = matcher.group(1)
+        result_type = base_test_result.ResultType.CRASH
+        duration = 0 # Don't know.
+
+      if log is not None:
+        log.append(l)
+
+      if result_type:
+        results.append(base_test_result.BaseTestResult(
+            test_name, result_type, duration,
+            log=('\n'.join(log) if log else '')))
+        log = None
+        result_type = None
+
+    return results
+
+  #override
+  def TearDown(self):
+    """Clear the mappings created by SetUp."""
+    if self._isolate_delegate:
+      self._isolate_delegate.Clear()
+
diff --git a/build/android/pylib/gtest/gtest_test_instance_test.py b/build/android/pylib/gtest/gtest_test_instance_test.py
new file mode 100755
index 0000000..c52b235
--- /dev/null
+++ b/build/android/pylib/gtest/gtest_test_instance_test.py
@@ -0,0 +1,86 @@
+#!/usr/bin/env python
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from pylib.gtest import gtest_test_instance
+
+
+class GtestTestInstanceTests(unittest.TestCase):
+
+  def testParseGTestListTests_simple(self):
+    raw_output = [
+      'TestCaseOne.',
+      '  testOne',
+      '  testTwo',
+      'TestCaseTwo.',
+      '  testThree',
+      '  testFour',
+    ]
+    actual = gtest_test_instance.ParseGTestListTests(raw_output)
+    expected = [
+      'TestCaseOne.testOne',
+      'TestCaseOne.testTwo',
+      'TestCaseTwo.testThree',
+      'TestCaseTwo.testFour',
+    ]
+    self.assertEqual(expected, actual)
+
+  def testParseGTestListTests_typeParameterized_old(self):
+    raw_output = [
+      'TPTestCase/WithTypeParam/0.',
+      '  testOne',
+      '  testTwo',
+    ]
+    actual = gtest_test_instance.ParseGTestListTests(raw_output)
+    expected = [
+      'TPTestCase/WithTypeParam/0.testOne',
+      'TPTestCase/WithTypeParam/0.testTwo',
+    ]
+    self.assertEqual(expected, actual)
+
+  def testParseGTestListTests_typeParameterized_new(self):
+    raw_output = [
+      'TPTestCase/WithTypeParam/0.  # TypeParam = TypeParam0',
+      '  testOne',
+      '  testTwo',
+    ]
+    actual = gtest_test_instance.ParseGTestListTests(raw_output)
+    expected = [
+      'TPTestCase/WithTypeParam/0.testOne',
+      'TPTestCase/WithTypeParam/0.testTwo',
+    ]
+    self.assertEqual(expected, actual)
+
+  def testParseGTestListTests_valueParameterized_old(self):
+    raw_output = [
+      'VPTestCase.',
+      '  testWithValueParam/0',
+      '  testWithValueParam/1',
+    ]
+    actual = gtest_test_instance.ParseGTestListTests(raw_output)
+    expected = [
+      'VPTestCase.testWithValueParam/0',
+      'VPTestCase.testWithValueParam/1',
+    ]
+    self.assertEqual(expected, actual)
+
+  def testParseGTestListTests_valueParameterized_new(self):
+    raw_output = [
+      'VPTestCase.',
+      '  testWithValueParam/0  # GetParam() = 0',
+      '  testWithValueParam/1  # GetParam() = 1',
+    ]
+    actual = gtest_test_instance.ParseGTestListTests(raw_output)
+    expected = [
+      'VPTestCase.testWithValueParam/0',
+      'VPTestCase.testWithValueParam/1',
+    ]
+    self.assertEqual(expected, actual)
+
+
+if __name__ == '__main__':
+  unittest.main(verbosity=2)
+
diff --git a/build/android/pylib/instrumentation/__init__.py b/build/android/pylib/instrumentation/__init__.py
new file mode 100644
index 0000000..727e987
--- /dev/null
+++ b/build/android/pylib/instrumentation/__init__.py
@@ -0,0 +1,4 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
diff --git a/build/android/pylib/instrumentation/instrumentation_parser.py b/build/android/pylib/instrumentation/instrumentation_parser.py
new file mode 100644
index 0000000..efd5efb
--- /dev/null
+++ b/build/android/pylib/instrumentation/instrumentation_parser.py
@@ -0,0 +1,96 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import re
+
+# http://developer.android.com/reference/android/test/InstrumentationTestRunner.html
+STATUS_CODE_START = 1
+STATUS_CODE_OK = 0
+STATUS_CODE_ERROR = -1
+STATUS_CODE_FAILURE = -2
+
+# http://developer.android.com/reference/android/app/Activity.html
+RESULT_CODE_OK = -1
+RESULT_CODE_CANCELED = 0
+
+_INSTR_LINE_RE = re.compile(r'^\s*INSTRUMENTATION_([A-Z_]+): (.*)$')
+
+
+class InstrumentationParser(object):
+
+  def __init__(self, stream):
+    """An incremental parser for the output of Android instrumentation tests.
+
+    Example:
+
+      stream = adb.IterShell('am instrument -r ...')
+      parser = InstrumentationParser(stream)
+
+      for code, bundle in parser.IterStatus():
+        # do something with each instrumentation status
+        print 'status:', code, bundle
+
+      # do something with the final instrumentation result
+      code, bundle = parser.GetResult()
+      print 'result:', code, bundle
+
+    Args:
+      stream: a sequence of lines as produced by the raw output of an
+        instrumentation test (e.g. by |am instrument -r|).
+    """
+    self._stream = stream
+    self._code = None
+    self._bundle = None
+
+  def IterStatus(self):
+    """Iterate over statuses as they are produced by the instrumentation test.
+
+    Yields:
+      A tuple (code, bundle) for each instrumentation status found in the
+      output.
+    """
+    def join_bundle_values(bundle):
+      for key in bundle:
+        bundle[key] = '\n'.join(bundle[key])
+      return bundle
+
+    bundle = {'STATUS': {}, 'RESULT': {}}
+    header = None
+    key = None
+    for line in self._stream:
+      m = _INSTR_LINE_RE.match(line)
+      if m:
+        header, value = m.groups()
+        key = None
+        if header in ['STATUS', 'RESULT'] and '=' in value:
+          key, value = value.split('=', 1)
+          bundle[header][key] = [value]
+        elif header == 'STATUS_CODE':
+          yield int(value), join_bundle_values(bundle['STATUS'])
+          bundle['STATUS'] = {}
+        elif header == 'CODE':
+          self._code = int(value)
+        else:
+          logging.warning('Unknown INSTRUMENTATION_%s line: %s', header, value)
+      elif key is not None:
+        bundle[header][key].append(line)
+
+    self._bundle = join_bundle_values(bundle['RESULT'])
+
+  def GetResult(self):
+    """Return the final instrumentation result.
+
+    Returns:
+      A pair (code, bundle) with the final instrumentation result. The |code|
+      may be None if no instrumentation result was found in the output.
+
+    Raises:
+      AssertionError if attempting to get the instrumentation result before
+      exhausting |IterStatus| first.
+    """
+    assert self._bundle is not None, (
+        'The IterStatus generator must be exhausted before reading the final'
+        ' instrumentation result.')
+    return self._code, self._bundle
diff --git a/build/android/pylib/instrumentation/instrumentation_parser_test.py b/build/android/pylib/instrumentation/instrumentation_parser_test.py
new file mode 100755
index 0000000..092d10f
--- /dev/null
+++ b/build/android/pylib/instrumentation/instrumentation_parser_test.py
@@ -0,0 +1,134 @@
+#!/usr/bin/env python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+"""Unit tests for instrumentation.InstrumentationParser."""
+
+import unittest
+
+from pylib.instrumentation import instrumentation_parser
+
+
+class InstrumentationParserTest(unittest.TestCase):
+
+  def testInstrumentationParser_nothing(self):
+    parser = instrumentation_parser.InstrumentationParser([''])
+    statuses = list(parser.IterStatus())
+    code, bundle = parser.GetResult()
+    self.assertEqual(None, code)
+    self.assertEqual({}, bundle)
+    self.assertEqual([], statuses)
+
+  def testInstrumentationParser_noMatchingStarts(self):
+    raw_output = [
+      '',
+      'this.is.a.test.package.TestClass:.',
+      'Test result for =.',
+      'Time: 1.234',
+      '',
+      'OK (1 test)',
+    ]
+
+    parser = instrumentation_parser.InstrumentationParser(raw_output)
+    statuses = list(parser.IterStatus())
+    code, bundle = parser.GetResult()
+    self.assertEqual(None, code)
+    self.assertEqual({}, bundle)
+    self.assertEqual([], statuses)
+
+  def testInstrumentationParser_resultAndCode(self):
+    raw_output = [
+      'INSTRUMENTATION_RESULT: shortMsg=foo bar',
+      'INSTRUMENTATION_RESULT: longMsg=a foo',
+      'walked into',
+      'a bar',
+      'INSTRUMENTATION_CODE: -1',
+    ]
+
+    parser = instrumentation_parser.InstrumentationParser(raw_output)
+    statuses = list(parser.IterStatus())
+    code, bundle = parser.GetResult()
+    self.assertEqual(-1, code)
+    self.assertEqual(
+        {'shortMsg': 'foo bar', 'longMsg': 'a foo\nwalked into\na bar'}, bundle)
+    self.assertEqual([], statuses)
+
+  def testInstrumentationParser_oneStatus(self):
+    raw_output = [
+      'INSTRUMENTATION_STATUS: foo=1',
+      'INSTRUMENTATION_STATUS: bar=hello',
+      'INSTRUMENTATION_STATUS: world=false',
+      'INSTRUMENTATION_STATUS: class=this.is.a.test.package.TestClass',
+      'INSTRUMENTATION_STATUS: test=testMethod',
+      'INSTRUMENTATION_STATUS_CODE: 0',
+    ]
+
+    parser = instrumentation_parser.InstrumentationParser(raw_output)
+    statuses = list(parser.IterStatus())
+
+    expected = [
+      (0, {
+        'foo': '1',
+        'bar': 'hello',
+        'world': 'false',
+        'class': 'this.is.a.test.package.TestClass',
+        'test': 'testMethod',
+      })
+    ]
+    self.assertEqual(expected, statuses)
+
+  def testInstrumentationParser_multiStatus(self):
+    raw_output = [
+      'INSTRUMENTATION_STATUS: class=foo',
+      'INSTRUMENTATION_STATUS: test=bar',
+      'INSTRUMENTATION_STATUS_CODE: 1',
+      'INSTRUMENTATION_STATUS: test_skipped=true',
+      'INSTRUMENTATION_STATUS_CODE: 0',
+      'INSTRUMENTATION_STATUS: class=hello',
+      'INSTRUMENTATION_STATUS: test=world',
+      'INSTRUMENTATION_STATUS: stack=',
+      'foo/bar.py (27)',
+      'hello/world.py (42)',
+      'test/file.py (1)',
+      'INSTRUMENTATION_STATUS_CODE: -1',
+    ]
+
+    parser = instrumentation_parser.InstrumentationParser(raw_output)
+    statuses = list(parser.IterStatus())
+
+    expected = [
+      (1, {'class': 'foo', 'test': 'bar',}),
+      (0, {'test_skipped': 'true'}),
+      (-1, {
+        'class': 'hello',
+        'test': 'world',
+        'stack': '\nfoo/bar.py (27)\nhello/world.py (42)\ntest/file.py (1)',
+      }),
+    ]
+    self.assertEqual(expected, statuses)
+
+  def testInstrumentationParser_statusResultAndCode(self):
+    raw_output = [
+      'INSTRUMENTATION_STATUS: class=foo',
+      'INSTRUMENTATION_STATUS: test=bar',
+      'INSTRUMENTATION_STATUS_CODE: 1',
+      'INSTRUMENTATION_RESULT: result=hello',
+      'world',
+      '',
+      '',
+      'INSTRUMENTATION_CODE: 0',
+    ]
+
+    parser = instrumentation_parser.InstrumentationParser(raw_output)
+    statuses = list(parser.IterStatus())
+    code, bundle = parser.GetResult()
+
+    self.assertEqual(0, code)
+    self.assertEqual({'result': 'hello\nworld\n\n'}, bundle)
+    self.assertEqual([(1, {'class': 'foo', 'test': 'bar'})], statuses)
+
+
+if __name__ == '__main__':
+  unittest.main(verbosity=2)
diff --git a/build/android/pylib/instrumentation/instrumentation_test_instance.py b/build/android/pylib/instrumentation/instrumentation_test_instance.py
new file mode 100644
index 0000000..610f084
--- /dev/null
+++ b/build/android/pylib/instrumentation/instrumentation_test_instance.py
@@ -0,0 +1,665 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+import copy
+import logging
+import os
+import pickle
+import re
+
+from devil.android import apk_helper
+from devil.android import md5sum
+from pylib import constants
+from pylib.base import base_test_result
+from pylib.base import test_instance
+from pylib.constants import host_paths
+from pylib.instrumentation import test_result
+from pylib.instrumentation import instrumentation_parser
+from pylib.utils import isolator
+from pylib.utils import proguard
+
+with host_paths.SysPath(host_paths.BUILD_COMMON_PATH):
+  import unittest_util # pylint: disable=import-error
+
+# Ref: http://developer.android.com/reference/android/app/Activity.html
+_ACTIVITY_RESULT_CANCELED = 0
+_ACTIVITY_RESULT_OK = -1
+
+_COMMAND_LINE_PARAMETER = 'cmdlinearg-parameter'
+_DEFAULT_ANNOTATIONS = [
+    'Smoke', 'SmallTest', 'MediumTest', 'LargeTest',
+    'EnormousTest', 'IntegrationTest']
+_EXCLUDE_UNLESS_REQUESTED_ANNOTATIONS = [
+    'DisabledTest', 'FlakyTest']
+_VALID_ANNOTATIONS = set(['Manual', 'PerfTest'] + _DEFAULT_ANNOTATIONS +
+                         _EXCLUDE_UNLESS_REQUESTED_ANNOTATIONS)
+_EXTRA_DRIVER_TEST_LIST = (
+    'org.chromium.test.driver.OnDeviceInstrumentationDriver.TestList')
+_EXTRA_DRIVER_TEST_LIST_FILE = (
+    'org.chromium.test.driver.OnDeviceInstrumentationDriver.TestListFile')
+_EXTRA_DRIVER_TARGET_PACKAGE = (
+    'org.chromium.test.driver.OnDeviceInstrumentationDriver.TargetPackage')
+_EXTRA_DRIVER_TARGET_CLASS = (
+    'org.chromium.test.driver.OnDeviceInstrumentationDriver.TargetClass')
+_EXTRA_TIMEOUT_SCALE = (
+    'org.chromium.test.driver.OnDeviceInstrumentationDriver.TimeoutScale')
+
+_PARAMETERIZED_TEST_ANNOTATION = 'ParameterizedTest'
+_PARAMETERIZED_TEST_SET_ANNOTATION = 'ParameterizedTest$Set'
+_NATIVE_CRASH_RE = re.compile('native crash', re.IGNORECASE)
+_PICKLE_FORMAT_VERSION = 10
+
+
+class MissingSizeAnnotationError(Exception):
+  def __init__(self, class_name):
+    super(MissingSizeAnnotationError, self).__init__(class_name +
+        ': Test method is missing required size annotation. Add one of: ' +
+        ', '.join('@' + a for a in _VALID_ANNOTATIONS))
+
+
+# TODO(jbudorick): Make these private class methods of
+# InstrumentationTestInstance once the instrumentation test_runner is
+# deprecated.
+def ParseAmInstrumentRawOutput(raw_output):
+  """Parses the output of an |am instrument -r| call.
+
+  Args:
+    raw_output: the output of an |am instrument -r| call as a list of lines
+  Returns:
+    A 3-tuple containing:
+      - the instrumentation code as an integer
+      - the instrumentation result as a list of lines
+      - the instrumentation statuses received as a list of 2-tuples
+        containing:
+        - the status code as an integer
+        - the bundle dump as a dict mapping string keys to a list of
+          strings, one for each line.
+  """
+  parser = instrumentation_parser.InstrumentationParser(raw_output)
+  statuses = list(parser.IterStatus())
+  code, bundle = parser.GetResult()
+  return (code, bundle, statuses)
+
+
+def GenerateTestResults(
+    result_code, result_bundle, statuses, start_ms, duration_ms):
+  """Generate test results from |statuses|.
+
+  Args:
+    result_code: The overall status code as an integer.
+    result_bundle: The summary bundle dump as a dict.
+    statuses: A list of 2-tuples containing:
+      - the status code as an integer
+      - the bundle dump as a dict mapping string keys to string values
+      Note that this is the same as the third item in the 3-tuple returned by
+      |_ParseAmInstrumentRawOutput|.
+    start_ms: The start time of the test in milliseconds.
+    duration_ms: The duration of the test in milliseconds.
+
+  Returns:
+    A list containing an instance of InstrumentationTestResult for each test
+    parsed.
+  """
+
+  results = []
+
+  current_result = None
+
+  for status_code, bundle in statuses:
+    test_class = bundle.get('class', '')
+    test_method = bundle.get('test', '')
+    if test_class and test_method:
+      test_name = '%s#%s' % (test_class, test_method)
+    else:
+      continue
+
+    if status_code == instrumentation_parser.STATUS_CODE_START:
+      if current_result:
+        results.append(current_result)
+      current_result = test_result.InstrumentationTestResult(
+          test_name, base_test_result.ResultType.UNKNOWN, start_ms, duration_ms)
+    else:
+      if status_code == instrumentation_parser.STATUS_CODE_OK:
+        if bundle.get('test_skipped', '').lower() in ('true', '1', 'yes'):
+          current_result.SetType(base_test_result.ResultType.SKIP)
+        elif current_result.GetType() == base_test_result.ResultType.UNKNOWN:
+          current_result.SetType(base_test_result.ResultType.PASS)
+      else:
+        if status_code not in (instrumentation_parser.STATUS_CODE_ERROR,
+                               instrumentation_parser.STATUS_CODE_FAILURE):
+          logging.error('Unrecognized status code %d. Handling as an error.',
+                        status_code)
+        current_result.SetType(base_test_result.ResultType.FAIL)
+        if 'stack' in bundle:
+          current_result.SetLog(bundle['stack'])
+
+  if current_result:
+    if current_result.GetType() == base_test_result.ResultType.UNKNOWN:
+      crashed = (result_code == _ACTIVITY_RESULT_CANCELED
+                 and any(_NATIVE_CRASH_RE.search(l)
+                         for l in result_bundle.itervalues()))
+      if crashed:
+        current_result.SetType(base_test_result.ResultType.CRASH)
+
+    results.append(current_result)
+
+  return results
+
+
+def ParseCommandLineFlagParameters(annotations):
+  """Determines whether the test is parameterized to be run with different
+     command-line flags.
+
+  Args:
+    annotations: The annotations of the test.
+
+  Returns:
+    If the test is parameterized, returns a list of named tuples
+    with lists of flags, e.g.:
+
+      [(add=['--flag-to-add']), (remove=['--flag-to-remove']), ()]
+
+    That means, the test must be run three times, the first time with
+    "--flag-to-add" added to command-line, the second time with
+    "--flag-to-remove" to be removed from command-line, and the third time
+    with default command-line args. If the same flag is listed both for adding
+    and for removing, it is left unchanged.
+
+    If the test is not parametrized, returns None.
+
+  """
+  ParamsTuple = collections.namedtuple('ParamsTuple', ['add', 'remove'])
+  parameterized_tests = []
+  if _PARAMETERIZED_TEST_SET_ANNOTATION in annotations:
+    if annotations[_PARAMETERIZED_TEST_SET_ANNOTATION]:
+      parameterized_tests = annotations[
+        _PARAMETERIZED_TEST_SET_ANNOTATION].get('tests', [])
+  elif _PARAMETERIZED_TEST_ANNOTATION in annotations:
+    parameterized_tests = [annotations[_PARAMETERIZED_TEST_ANNOTATION]]
+  else:
+    return None
+
+  result = []
+  for pt in parameterized_tests:
+    if not pt:
+      continue
+    for p in pt['parameters']:
+      if p['tag'] == _COMMAND_LINE_PARAMETER:
+        to_add = []
+        to_remove = []
+        for a in p.get('arguments', []):
+          if a['name'] == 'add':
+            to_add = ['--%s' % f for f in a['stringArray']]
+          elif a['name'] == 'remove':
+            to_remove = ['--%s' % f for f in a['stringArray']]
+        result.append(ParamsTuple(to_add, to_remove))
+  return result if result else None
+
+
+class InstrumentationTestInstance(test_instance.TestInstance):
+
+  def __init__(self, args, isolate_delegate, error_func):
+    super(InstrumentationTestInstance, self).__init__()
+
+    self._additional_apks = []
+    self._apk_under_test = None
+    self._apk_under_test_incremental_install_script = None
+    self._package_info = None
+    self._suite = None
+    self._test_apk = None
+    self._test_apk_incremental_install_script = None
+    self._test_jar = None
+    self._test_package = None
+    self._test_runner = None
+    self._test_support_apk = None
+    self._initializeApkAttributes(args, error_func)
+
+    self._data_deps = None
+    self._isolate_abs_path = None
+    self._isolate_delegate = None
+    self._isolated_abs_path = None
+    self._test_data = None
+    self._initializeDataDependencyAttributes(args, isolate_delegate)
+
+    self._annotations = None
+    self._excluded_annotations = None
+    self._test_filter = None
+    self._initializeTestFilterAttributes(args)
+
+    self._flags = None
+    self._initializeFlagAttributes(args)
+
+    self._driver_apk = None
+    self._driver_package = None
+    self._driver_name = None
+    self._initializeDriverAttributes()
+
+    self._timeout_scale = None
+    self._initializeTestControlAttributes(args)
+
+  def _initializeApkAttributes(self, args, error_func):
+    if args.apk_under_test:
+      apk_under_test_path = args.apk_under_test
+      if not args.apk_under_test.endswith('.apk'):
+        apk_under_test_path = os.path.join(
+            constants.GetOutDirectory(), constants.SDK_BUILD_APKS_DIR,
+            '%s.apk' % args.apk_under_test)
+
+      if not os.path.exists(apk_under_test_path):
+        error_func('Unable to find APK under test: %s' % apk_under_test_path)
+
+      self._apk_under_test = apk_helper.ToHelper(apk_under_test_path)
+
+    if args.test_apk.endswith('.apk'):
+      self._suite = os.path.splitext(os.path.basename(args.test_apk))[0]
+      self._test_apk = apk_helper.ToHelper(args.test_apk)
+    else:
+      self._suite = args.test_apk
+      self._test_apk = apk_helper.ToHelper(os.path.join(
+          constants.GetOutDirectory(), constants.SDK_BUILD_APKS_DIR,
+          '%s.apk' % args.test_apk))
+
+    self._apk_under_test_incremental_install_script = (
+        args.apk_under_test_incremental_install_script)
+    self._test_apk_incremental_install_script = (
+        args.test_apk_incremental_install_script)
+
+    if self._test_apk_incremental_install_script:
+      assert self._suite.endswith('_incremental')
+      self._suite = self._suite[:-len('_incremental')]
+
+    self._test_jar = os.path.join(
+        constants.GetOutDirectory(), constants.SDK_BUILD_TEST_JAVALIB_DIR,
+        '%s.jar' % self._suite)
+    self._test_support_apk = apk_helper.ToHelper(os.path.join(
+        constants.GetOutDirectory(), constants.SDK_BUILD_TEST_JAVALIB_DIR,
+        '%sSupport.apk' % self._suite))
+
+    if not os.path.exists(self._test_apk.path):
+      error_func('Unable to find test APK: %s' % self._test_apk.path)
+    if not os.path.exists(self._test_jar):
+      error_func('Unable to find test JAR: %s' % self._test_jar)
+
+    self._test_package = self._test_apk.GetPackageName()
+    self._test_runner = self._test_apk.GetInstrumentationName()
+
+    self._package_info = None
+    if self._apk_under_test:
+      package_under_test = self._apk_under_test.GetPackageName()
+      for package_info in constants.PACKAGE_INFO.itervalues():
+        if package_under_test == package_info.package:
+          self._package_info = package_info
+    if not self._package_info:
+      logging.warning('Unable to find package info for %s', self._test_package)
+
+    for apk in args.additional_apks:
+      if not os.path.exists(apk):
+        error_func('Unable to find additional APK: %s' % apk)
+    self._additional_apks = (
+        [apk_helper.ToHelper(x) for x in args.additional_apks])
+
+  def _initializeDataDependencyAttributes(self, args, isolate_delegate):
+    self._data_deps = []
+    if (args.isolate_file_path and
+        not isolator.IsIsolateEmpty(args.isolate_file_path)):
+      if os.path.isabs(args.isolate_file_path):
+        self._isolate_abs_path = args.isolate_file_path
+      else:
+        self._isolate_abs_path = os.path.join(
+            constants.DIR_SOURCE_ROOT, args.isolate_file_path)
+      self._isolate_delegate = isolate_delegate
+      self._isolated_abs_path = os.path.join(
+          constants.GetOutDirectory(), '%s.isolated' % self._test_package)
+    else:
+      self._isolate_delegate = None
+
+    # TODO(jbudorick): Deprecate and remove --test-data once data dependencies
+    # are fully converted to isolate.
+    if args.test_data:
+      logging.info('Data dependencies specified via --test-data')
+      self._test_data = args.test_data
+    else:
+      self._test_data = None
+
+    if not self._isolate_delegate and not self._test_data:
+      logging.warning('No data dependencies will be pushed.')
+
+  def _initializeTestFilterAttributes(self, args):
+    if args.test_filter:
+      self._test_filter = args.test_filter.replace('#', '.')
+
+    def annotation_dict_element(a):
+      a = a.split('=')
+      return (a[0], a[1] if len(a) == 2 else None)
+
+    if args.annotation_str:
+      self._annotations = dict(
+          annotation_dict_element(a)
+          for a in args.annotation_str.split(','))
+    elif not self._test_filter:
+      self._annotations = dict(
+          annotation_dict_element(a)
+          for a in _DEFAULT_ANNOTATIONS)
+    else:
+      self._annotations = {}
+
+    if args.exclude_annotation_str:
+      self._excluded_annotations = dict(
+          annotation_dict_element(a)
+          for a in args.exclude_annotation_str.split(','))
+    else:
+      self._excluded_annotations = {}
+
+    self._excluded_annotations.update(
+        {
+          a: None for a in _EXCLUDE_UNLESS_REQUESTED_ANNOTATIONS
+          if a not in self._annotations
+        })
+
+  def _initializeFlagAttributes(self, args):
+    self._flags = ['--enable-test-intents']
+    # TODO(jbudorick): Transition "--device-flags" to "--device-flags-file"
+    if hasattr(args, 'device_flags') and args.device_flags:
+      with open(args.device_flags) as device_flags_file:
+        stripped_lines = (l.strip() for l in device_flags_file)
+        self._flags.extend([flag for flag in stripped_lines if flag])
+    if hasattr(args, 'device_flags_file') and args.device_flags_file:
+      with open(args.device_flags_file) as device_flags_file:
+        stripped_lines = (l.strip() for l in device_flags_file)
+        self._flags.extend([flag for flag in stripped_lines if flag])
+    if (hasattr(args, 'strict_mode') and
+        args.strict_mode and
+        args.strict_mode != 'off'):
+      self._flags.append('--strict-mode=' + args.strict_mode)
+
+  def _initializeDriverAttributes(self):
+    self._driver_apk = os.path.join(
+        constants.GetOutDirectory(), constants.SDK_BUILD_APKS_DIR,
+        'OnDeviceInstrumentationDriver.apk')
+    if os.path.exists(self._driver_apk):
+      driver_apk = apk_helper.ApkHelper(self._driver_apk)
+      self._driver_package = driver_apk.GetPackageName()
+      self._driver_name = driver_apk.GetInstrumentationName()
+    else:
+      self._driver_apk = None
+
+  def _initializeTestControlAttributes(self, args):
+    self._timeout_scale = args.timeout_scale or 1
+
+  @property
+  def additional_apks(self):
+    return self._additional_apks
+
+  @property
+  def apk_under_test(self):
+    return self._apk_under_test
+
+  @property
+  def apk_under_test_incremental_install_script(self):
+    return self._apk_under_test_incremental_install_script
+
+  @property
+  def flags(self):
+    return self._flags
+
+  @property
+  def driver_apk(self):
+    return self._driver_apk
+
+  @property
+  def driver_package(self):
+    return self._driver_package
+
+  @property
+  def driver_name(self):
+    return self._driver_name
+
+  @property
+  def package_info(self):
+    return self._package_info
+
+  @property
+  def suite(self):
+    return self._suite
+
+  @property
+  def test_apk(self):
+    return self._test_apk
+
+  @property
+  def test_apk_incremental_install_script(self):
+    return self._test_apk_incremental_install_script
+
+  @property
+  def test_jar(self):
+    return self._test_jar
+
+  @property
+  def test_support_apk(self):
+    return self._test_support_apk
+
+  @property
+  def test_package(self):
+    return self._test_package
+
+  @property
+  def test_runner(self):
+    return self._test_runner
+
+  @property
+  def timeout_scale(self):
+    return self._timeout_scale
+
+  #override
+  def TestType(self):
+    return 'instrumentation'
+
+  #override
+  def SetUp(self):
+    if self._isolate_delegate:
+      self._isolate_delegate.Remap(
+          self._isolate_abs_path, self._isolated_abs_path)
+      self._isolate_delegate.MoveOutputDeps()
+      self._data_deps.extend([(self._isolate_delegate.isolate_deps_dir, None)])
+
+    # TODO(jbudorick): Convert existing tests that depend on the --test-data
+    # mechanism to isolate, then remove this.
+    if self._test_data:
+      for t in self._test_data:
+        device_rel_path, host_rel_path = t.split(':')
+        host_abs_path = os.path.join(host_paths.DIR_SOURCE_ROOT, host_rel_path)
+        self._data_deps.extend(
+            [(host_abs_path,
+              [None, 'chrome', 'test', 'data', device_rel_path])])
+
+  def GetDataDependencies(self):
+    return self._data_deps
+
+  def GetTests(self):
+    pickle_path = '%s-proguard.pickle' % self.test_jar
+    try:
+      tests = self._GetTestsFromPickle(pickle_path, self.test_jar)
+    except self.ProguardPickleException as e:
+      logging.info('Getting tests from JAR via proguard. (%s)', str(e))
+      tests = self._GetTestsFromProguard(self.test_jar)
+      self._SaveTestsToPickle(pickle_path, self.test_jar, tests)
+    return self._ParametrizeTestsWithFlags(
+        self._InflateTests(self._FilterTests(tests)))
+
+  class ProguardPickleException(Exception):
+    pass
+
+  def _GetTestsFromPickle(self, pickle_path, jar_path):
+    if not os.path.exists(pickle_path):
+      raise self.ProguardPickleException('%s does not exist.' % pickle_path)
+    if os.path.getmtime(pickle_path) <= os.path.getmtime(jar_path):
+      raise self.ProguardPickleException(
+          '%s newer than %s.' % (jar_path, pickle_path))
+
+    with open(pickle_path, 'r') as pickle_file:
+      pickle_data = pickle.loads(pickle_file.read())
+    jar_md5 = md5sum.CalculateHostMd5Sums(jar_path)[jar_path]
+
+    try:
+      if pickle_data['VERSION'] != _PICKLE_FORMAT_VERSION:
+        raise self.ProguardPickleException('PICKLE_FORMAT_VERSION has changed.')
+      if pickle_data['JAR_MD5SUM'] != jar_md5:
+        raise self.ProguardPickleException('JAR file MD5 sum differs.')
+      return pickle_data['TEST_METHODS']
+    except TypeError as e:
+      logging.error(pickle_data)
+      raise self.ProguardPickleException(str(e))
+
+  # pylint: disable=no-self-use
+  def _GetTestsFromProguard(self, jar_path):
+    p = proguard.Dump(jar_path)
+
+    def is_test_class(c):
+      return c['class'].endswith('Test')
+
+    def is_test_method(m):
+      return m['method'].startswith('test')
+
+    class_lookup = dict((c['class'], c) for c in p['classes'])
+    def recursive_get_class_annotations(c):
+      s = c['superclass']
+      if s in class_lookup:
+        a = recursive_get_class_annotations(class_lookup[s])
+      else:
+        a = {}
+      a.update(c['annotations'])
+      return a
+
+    def stripped_test_class(c):
+      return {
+        'class': c['class'],
+        'annotations': recursive_get_class_annotations(c),
+        'methods': [m for m in c['methods'] if is_test_method(m)],
+      }
+
+    return [stripped_test_class(c) for c in p['classes']
+            if is_test_class(c)]
+
+  def _SaveTestsToPickle(self, pickle_path, jar_path, tests):
+    jar_md5 = md5sum.CalculateHostMd5Sums(jar_path)[jar_path]
+    pickle_data = {
+      'VERSION': _PICKLE_FORMAT_VERSION,
+      'JAR_MD5SUM': jar_md5,
+      'TEST_METHODS': tests,
+    }
+    with open(pickle_path, 'w') as pickle_file:
+      pickle.dump(pickle_data, pickle_file)
+
+  def _FilterTests(self, tests):
+
+    def gtest_filter(c, m):
+      if not self._test_filter:
+        return True
+      # Allow fully-qualified name as well as an omitted package.
+      names = ['%s.%s' % (c['class'], m['method']),
+               '%s.%s' % (c['class'].split('.')[-1], m['method'])]
+      return unittest_util.FilterTestNames(names, self._test_filter)
+
+    def annotation_filter(all_annotations):
+      if not self._annotations:
+        return True
+      return any_annotation_matches(self._annotations, all_annotations)
+
+    def excluded_annotation_filter(all_annotations):
+      if not self._excluded_annotations:
+        return True
+      return not any_annotation_matches(self._excluded_annotations,
+                                        all_annotations)
+
+    def any_annotation_matches(annotations, all_annotations):
+      return any(
+          ak in all_annotations and (av is None or av == all_annotations[ak])
+          for ak, av in annotations.iteritems())
+
+    filtered_classes = []
+    for c in tests:
+      filtered_methods = []
+      for m in c['methods']:
+        # Gtest filtering
+        if not gtest_filter(c, m):
+          continue
+
+        all_annotations = dict(c['annotations'])
+        all_annotations.update(m['annotations'])
+
+        # Enforce that all tests declare their size.
+        if not any(a in _VALID_ANNOTATIONS for a in all_annotations):
+          raise MissingSizeAnnotationError('%s.%s' % (c['class'], m['method']))
+
+        if (not annotation_filter(all_annotations)
+            or not excluded_annotation_filter(all_annotations)):
+          continue
+
+        filtered_methods.append(m)
+
+      if filtered_methods:
+        filtered_class = dict(c)
+        filtered_class['methods'] = filtered_methods
+        filtered_classes.append(filtered_class)
+
+    return filtered_classes
+
+  def _InflateTests(self, tests):
+    inflated_tests = []
+    for c in tests:
+      for m in c['methods']:
+        a = dict(c['annotations'])
+        a.update(m['annotations'])
+        inflated_tests.append({
+            'class': c['class'],
+            'method': m['method'],
+            'annotations': a,
+        })
+    return inflated_tests
+
+  def _ParametrizeTestsWithFlags(self, tests):
+    new_tests = []
+    for t in tests:
+      parameters = ParseCommandLineFlagParameters(t['annotations'])
+      if parameters:
+        t['flags'] = parameters[0]
+        for p in parameters[1:]:
+          parameterized_t = copy.copy(t)
+          parameterized_t['flags'] = p
+          new_tests.append(parameterized_t)
+    return tests + new_tests
+
+  def GetDriverEnvironmentVars(
+      self, test_list=None, test_list_file_path=None):
+    env = {
+      _EXTRA_DRIVER_TARGET_PACKAGE: self.test_package,
+      _EXTRA_DRIVER_TARGET_CLASS: self.test_runner,
+      _EXTRA_TIMEOUT_SCALE: self._timeout_scale,
+    }
+
+    if test_list:
+      env[_EXTRA_DRIVER_TEST_LIST] = ','.join(test_list)
+
+    if test_list_file_path:
+      env[_EXTRA_DRIVER_TEST_LIST_FILE] = (
+          os.path.basename(test_list_file_path))
+
+    return env
+
+  @staticmethod
+  def ParseAmInstrumentRawOutput(raw_output):
+    return ParseAmInstrumentRawOutput(raw_output)
+
+  @staticmethod
+  def GenerateTestResults(
+      result_code, result_bundle, statuses, start_ms, duration_ms):
+    return GenerateTestResults(result_code, result_bundle, statuses,
+                               start_ms, duration_ms)
+
+  #override
+  def TearDown(self):
+    if self._isolate_delegate:
+      self._isolate_delegate.Clear()
+
diff --git a/build/android/pylib/instrumentation/instrumentation_test_instance_test.py b/build/android/pylib/instrumentation/instrumentation_test_instance_test.py
new file mode 100755
index 0000000..3837f1f
--- /dev/null
+++ b/build/android/pylib/instrumentation/instrumentation_test_instance_test.py
@@ -0,0 +1,123 @@
+#!/usr/bin/env python
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+"""Unit tests for instrumentation.TestRunner."""
+
+import unittest
+
+from pylib.base import base_test_result
+from pylib.constants import host_paths
+from pylib.instrumentation import instrumentation_test_instance
+
+with host_paths.SysPath(host_paths.PYMOCK_PATH):
+  import mock  # pylint: disable=import-error
+
+
+class InstrumentationTestInstanceTest(unittest.TestCase):
+
+  def setUp(self):
+    options = mock.Mock()
+    options.tool = ''
+
+  def testGenerateTestResults_noStatus(self):
+    results = instrumentation_test_instance.GenerateTestResults(
+        None, None, [], 0, 1000)
+    self.assertEqual([], results)
+
+  def testGenerateTestResults_testPassed(self):
+    statuses = [
+      (1, {
+        'class': 'test.package.TestClass',
+        'test': 'testMethod',
+      }),
+      (0, {
+        'class': 'test.package.TestClass',
+        'test': 'testMethod',
+      }),
+    ]
+    results = instrumentation_test_instance.GenerateTestResults(
+        None, None, statuses, 0, 1000)
+    self.assertEqual(1, len(results))
+    self.assertEqual(base_test_result.ResultType.PASS, results[0].GetType())
+
+  def testGenerateTestResults_testSkipped_true(self):
+    statuses = [
+      (1, {
+        'class': 'test.package.TestClass',
+        'test': 'testMethod',
+      }),
+      (0, {
+        'test_skipped': 'true',
+        'class': 'test.package.TestClass',
+        'test': 'testMethod',
+      }),
+      (0, {
+        'class': 'test.package.TestClass',
+        'test': 'testMethod',
+      }),
+    ]
+    results = instrumentation_test_instance.GenerateTestResults(
+        None, None, statuses, 0, 1000)
+    self.assertEqual(1, len(results))
+    self.assertEqual(base_test_result.ResultType.SKIP, results[0].GetType())
+
+  def testGenerateTestResults_testSkipped_false(self):
+    statuses = [
+      (1, {
+        'class': 'test.package.TestClass',
+        'test': 'testMethod',
+      }),
+      (0, {
+        'test_skipped': 'false',
+      }),
+      (0, {
+        'class': 'test.package.TestClass',
+        'test': 'testMethod',
+      }),
+    ]
+    results = instrumentation_test_instance.GenerateTestResults(
+        None, None, statuses, 0, 1000)
+    self.assertEqual(1, len(results))
+    self.assertEqual(base_test_result.ResultType.PASS, results[0].GetType())
+
+  def testGenerateTestResults_testFailed(self):
+    statuses = [
+      (1, {
+        'class': 'test.package.TestClass',
+        'test': 'testMethod',
+      }),
+      (-2, {
+        'class': 'test.package.TestClass',
+        'test': 'testMethod',
+      }),
+    ]
+    results = instrumentation_test_instance.GenerateTestResults(
+        None, None, statuses, 0, 1000)
+    self.assertEqual(1, len(results))
+    self.assertEqual(base_test_result.ResultType.FAIL, results[0].GetType())
+
+  def testGenerateTestResults_testUnknownException(self):
+    stacktrace = 'long\nstacktrace'
+    statuses = [
+      (1, {
+        'class': 'test.package.TestClass',
+        'test': 'testMethod',
+      }),
+      (-1, {
+        'class': 'test.package.TestClass',
+        'test': 'testMethod',
+        'stack': stacktrace,
+      }),
+    ]
+    results = instrumentation_test_instance.GenerateTestResults(
+        None, None, statuses, 0, 1000)
+    self.assertEqual(1, len(results))
+    self.assertEqual(base_test_result.ResultType.FAIL, results[0].GetType())
+    self.assertEqual(stacktrace, results[0].GetLog())
+
+
+if __name__ == '__main__':
+  unittest.main(verbosity=2)
diff --git a/build/android/pylib/instrumentation/json_perf_parser.py b/build/android/pylib/instrumentation/json_perf_parser.py
new file mode 100644
index 0000000..c647890
--- /dev/null
+++ b/build/android/pylib/instrumentation/json_perf_parser.py
@@ -0,0 +1,161 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+"""A helper module for parsing JSON objects from perf tests results."""
+
+import json
+
+
+def GetAverageRunInfo(json_data, name):
+  """Summarizes TraceEvent JSON data for performance metrics.
+
+  Example JSON Inputs (More tags can be added but these are required):
+  Measuring Duration:
+  [
+    { "cat": "Java",
+      "ts": 10000000000,
+      "ph": "S",
+      "name": "TestTrace"
+    },
+    { "cat": "Java",
+      "ts": 10000004000,
+      "ph": "F",
+      "name": "TestTrace"
+    },
+    ...
+  ]
+
+  Measuring Call Frequency (FPS):
+  [
+    { "cat": "Java",
+      "ts": 10000000000,
+      "ph": "I",
+      "name": "TestTraceFPS"
+    },
+    { "cat": "Java",
+      "ts": 10000004000,
+      "ph": "I",
+      "name": "TestTraceFPS"
+    },
+    ...
+  ]
+
+  Args:
+    json_data: A list of dictonaries each representing a JSON object.
+    name: The 'name' tag to filter on in the JSON file.
+
+  Returns:
+    A dictionary of result data with the following tags:
+      min: The minimum value tracked.
+      max: The maximum value tracked.
+      average: The average of all the values tracked.
+      count: The number of times the category/name pair was tracked.
+      type: The type of tracking ('Instant' for instant tags and 'Span' for
+            begin/end tags.
+      category: The passed in category filter.
+      name: The passed in name filter.
+      data_points: A list of all of the times used to generate this data.
+      units: The units for the values being reported.
+
+  Raises:
+    Exception: if entry contains invalid data.
+  """
+
+  def EntryFilter(entry):
+    return entry['cat'] == 'Java' and entry['name'] == name
+  filtered_entries = [j for j in json_data if EntryFilter(j)]
+
+  result = {}
+
+  result['min'] = -1
+  result['max'] = -1
+  result['average'] = 0
+  result['count'] = 0
+  result['type'] = 'Unknown'
+  result['category'] = 'Java'
+  result['name'] = name
+  result['data_points'] = []
+  result['units'] = ''
+
+  total_sum = 0
+
+  last_val = 0
+  val_type = None
+  for entry in filtered_entries:
+    if not val_type:
+      if 'mem' in entry:
+        val_type = 'mem'
+
+        def GetVal(entry):
+          return entry['mem']
+
+        result['units'] = 'kb'
+      elif 'ts' in entry:
+        val_type = 'ts'
+
+        def GetVal(entry):
+          return float(entry['ts']) / 1000.0
+
+        result['units'] = 'ms'
+      else:
+        raise Exception('Entry did not contain valid value info: %s' % entry)
+
+    if not val_type in entry:
+      raise Exception('Entry did not contain expected value type "%s" '
+                      'information: %s' % (val_type, entry))
+    val = GetVal(entry)
+    if (entry['ph'] == 'S' and
+        (result['type'] == 'Unknown' or result['type'] == 'Span')):
+      result['type'] = 'Span'
+      last_val = val
+    elif ((entry['ph'] == 'F' and result['type'] == 'Span') or
+          (entry['ph'] == 'I' and (result['type'] == 'Unknown' or
+                                   result['type'] == 'Instant'))):
+      if last_val > 0:
+        delta = val - last_val
+        if result['min'] == -1 or result['min'] > delta:
+          result['min'] = delta
+        if result['max'] == -1 or result['max'] < delta:
+          result['max'] = delta
+        total_sum += delta
+        result['count'] += 1
+        result['data_points'].append(delta)
+      if entry['ph'] == 'I':
+        result['type'] = 'Instant'
+        last_val = val
+  if result['count'] > 0:
+    result['average'] = total_sum / result['count']
+
+  return result
+
+
+def GetAverageRunInfoFromJSONString(json_string, name):
+  """Returns the results from GetAverageRunInfo using a JSON string.
+
+  Args:
+    json_string: The string containing JSON.
+    name: The 'name' tag to filter on in the JSON file.
+
+  Returns:
+    See GetAverageRunInfo Returns section.
+  """
+  return GetAverageRunInfo(json.loads(json_string), name)
+
+
+def GetAverageRunInfoFromFile(json_file, name):
+  """Returns the results from GetAverageRunInfo using a JSON file.
+
+  Args:
+    json_file: The path to a JSON file.
+    name: The 'name' tag to filter on in the JSON file.
+
+  Returns:
+    See GetAverageRunInfo Returns section.
+  """
+  with open(json_file, 'r') as f:
+    data = f.read()
+    perf = json.loads(data)
+
+  return GetAverageRunInfo(perf, name)
diff --git a/build/android/pylib/instrumentation/test_result.py b/build/android/pylib/instrumentation/test_result.py
new file mode 100644
index 0000000..24e80a8
--- /dev/null
+++ b/build/android/pylib/instrumentation/test_result.py
@@ -0,0 +1,30 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from pylib.base import base_test_result
+
+
+class InstrumentationTestResult(base_test_result.BaseTestResult):
+  """Result information for a single instrumentation test."""
+
+  def __init__(self, full_name, test_type, start_date, dur, log=''):
+    """Construct an InstrumentationTestResult object.
+
+    Args:
+      full_name: Full name of the test.
+      test_type: Type of the test result as defined in ResultType.
+      start_date: Date in milliseconds when the test began running.
+      dur: Duration of the test run in milliseconds.
+      log: A string listing any errors.
+    """
+    super(InstrumentationTestResult, self).__init__(
+        full_name, test_type, dur, log)
+    name_pieces = full_name.rsplit('#')
+    if len(name_pieces) > 1:
+      self._test_name = name_pieces[1]
+      self._class_name = name_pieces[0]
+    else:
+      self._class_name = full_name
+      self._test_name = full_name
+    self._start_date = start_date
diff --git a/build/android/pylib/junit/__init__.py b/build/android/pylib/junit/__init__.py
new file mode 100644
index 0000000..5cac026
--- /dev/null
+++ b/build/android/pylib/junit/__init__.py
@@ -0,0 +1,4 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
diff --git a/build/android/pylib/junit/setup.py b/build/android/pylib/junit/setup.py
new file mode 100644
index 0000000..94d4277
--- /dev/null
+++ b/build/android/pylib/junit/setup.py
@@ -0,0 +1,20 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from pylib.junit import test_runner
+
+def Setup(args):
+  """Creates a test runner factory for junit tests.
+
+  Args:
+    args: an argparse.Namespace object.
+  Return:
+    A (runner_factory, tests) tuple.
+  """
+
+  def TestRunnerFactory(_unused_device, _unused_shard_index):
+    return test_runner.JavaTestRunner(args)
+
+  return (TestRunnerFactory, ['JUnit tests'])
+
diff --git a/build/android/pylib/junit/test_dispatcher.py b/build/android/pylib/junit/test_dispatcher.py
new file mode 100644
index 0000000..51253d4
--- /dev/null
+++ b/build/android/pylib/junit/test_dispatcher.py
@@ -0,0 +1,29 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from pylib import constants
+from pylib.base import base_test_result
+
+def RunTests(tests, runner_factory):
+  """Runs a set of java tests on the host.
+
+  Return:
+    A tuple containing the results & the exit code.
+  """
+  def run(t):
+    runner = runner_factory(None, None)
+    runner.SetUp()
+    results_list, return_code = runner.RunTest(t)
+    runner.TearDown()
+    return (results_list, return_code == 0)
+
+  test_run_results = base_test_result.TestRunResults()
+  exit_code = 0
+  for t in tests:
+    results_list, passed = run(t)
+    test_run_results.AddResults(results_list)
+    if not passed:
+      exit_code = constants.ERROR_EXIT_CODE
+  return (test_run_results, exit_code)
+
diff --git a/build/android/pylib/junit/test_runner.py b/build/android/pylib/junit/test_runner.py
new file mode 100644
index 0000000..6238fe1
--- /dev/null
+++ b/build/android/pylib/junit/test_runner.py
@@ -0,0 +1,49 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import os
+import tempfile
+
+from devil.utils import cmd_helper
+from pylib import constants
+from pylib.results import json_results
+
+class JavaTestRunner(object):
+  """Runs java tests on the host."""
+
+  def __init__(self, args):
+    self._package_filter = args.package_filter
+    self._runner_filter = args.runner_filter
+    self._sdk_version = args.sdk_version
+    self._test_filter = args.test_filter
+    self._test_suite = args.test_suite
+
+  def SetUp(self):
+    pass
+
+  def RunTest(self, _test):
+    """Runs junit tests from |self._test_suite|."""
+    with tempfile.NamedTemporaryFile() as json_file:
+      java_script = os.path.join(
+          constants.GetOutDirectory(), 'bin', 'helper', self._test_suite)
+      command = [java_script,
+                 '-test-jars', self._test_suite + '.jar',
+                 '-json-results-file', json_file.name]
+      if self._test_filter:
+        command.extend(['-gtest-filter', self._test_filter])
+      if self._package_filter:
+        command.extend(['-package-filter', self._package_filter])
+      if self._runner_filter:
+        command.extend(['-runner-filter', self._runner_filter])
+      if self._sdk_version:
+        command.extend(['-sdk-version', self._sdk_version])
+      return_code = cmd_helper.RunCmd(command)
+      results_list = json_results.ParseResultsFromJson(
+          json.loads(json_file.read()))
+      return (results_list, return_code)
+
+  def TearDown(self):
+    pass
+
diff --git a/build/android/pylib/linker/__init__.py b/build/android/pylib/linker/__init__.py
new file mode 100644
index 0000000..af99437
--- /dev/null
+++ b/build/android/pylib/linker/__init__.py
@@ -0,0 +1,4 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
diff --git a/build/android/pylib/linker/setup.py b/build/android/pylib/linker/setup.py
new file mode 100644
index 0000000..3f380ea
--- /dev/null
+++ b/build/android/pylib/linker/setup.py
@@ -0,0 +1,60 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Setup for linker tests."""
+
+import logging
+
+from pylib.constants import host_paths
+from pylib.linker import test_case
+from pylib.linker import test_runner
+
+with host_paths.SysPath(host_paths.BUILD_COMMON_PATH):
+  import unittest_util # pylint: disable=import-error
+
+# ModernLinker requires Android M (API level 23) or later.
+_VERSION_SDK_PROPERTY = 'ro.build.version.sdk'
+_MODERN_LINKER_MINIMUM_SDK_INT = 23
+
+def Setup(args, devices):
+  """Creates a list of test cases and a runner factory.
+
+  Args:
+    args: an argparse.Namespace object.
+    devices: an iterable of available devices.
+  Returns:
+    A tuple of (TestRunnerFactory, tests).
+  """
+  legacy_linker_tests = [
+      test_case.LinkerSharedRelroTest(is_modern_linker=False,
+                                      is_low_memory=False),
+      test_case.LinkerSharedRelroTest(is_modern_linker=False,
+                                      is_low_memory=True),
+  ]
+  modern_linker_tests = [
+      test_case.LinkerSharedRelroTest(is_modern_linker=True),
+  ]
+
+  min_sdk_int = 1 << 31
+  for device in devices:
+    min_sdk_int = min(min_sdk_int, device.build_version_sdk)
+
+  if min_sdk_int >= _MODERN_LINKER_MINIMUM_SDK_INT:
+    all_tests = legacy_linker_tests + modern_linker_tests
+  else:
+    all_tests = legacy_linker_tests
+    logging.warn('Not running LinkerModern tests (requires API %d, found %d)',
+                 _MODERN_LINKER_MINIMUM_SDK_INT, min_sdk_int)
+
+  if args.test_filter:
+    all_test_names = [test.qualified_name for test in all_tests]
+    filtered_test_names = unittest_util.FilterTestNames(all_test_names,
+                                                        args.test_filter)
+    all_tests = [t for t in all_tests \
+                 if t.qualified_name in filtered_test_names]
+
+  def TestRunnerFactory(device, _shard_index):
+    return test_runner.LinkerTestRunner(device, args.tool)
+
+  return (TestRunnerFactory, all_tests)
diff --git a/build/android/pylib/linker/test_case.py b/build/android/pylib/linker/test_case.py
new file mode 100644
index 0000000..475b730
--- /dev/null
+++ b/build/android/pylib/linker/test_case.py
@@ -0,0 +1,227 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Base class for linker-specific test cases.
+
+   The custom dynamic linker can only be tested through a custom test case
+   for various technical reasons:
+
+     - It's an 'invisible feature', i.e. it doesn't expose a new API or
+       behaviour, all it does is save RAM when loading native libraries.
+
+     - Checking that it works correctly requires several things that do not
+       fit the existing GTest-based and instrumentation-based tests:
+
+         - Native test code needs to be run in both the browser and renderer
+           process at the same time just after loading native libraries, in
+           a completely asynchronous way.
+
+         - Each test case requires restarting a whole new application process
+           with a different command-line.
+
+         - Enabling test support in the Linker code requires building a special
+           APK with a flag to activate special test-only support code in the
+           Linker code itself.
+
+       Host-driven tests have also been tried, but since they're really
+       sub-classes of instrumentation tests, they didn't work well either.
+
+   To build and run the linker tests, do the following:
+
+     ninja -C out/Debug chromium_linker_test_apk
+     build/android/test_runner.py linker
+
+"""
+# pylint: disable=R0201
+
+import logging
+import re
+
+from devil.android import device_errors
+from devil.android.sdk import intent
+from pylib.base import base_test_result
+
+
+ResultType = base_test_result.ResultType
+
+_PACKAGE_NAME = 'org.chromium.chromium_linker_test_apk'
+_ACTIVITY_NAME = '.ChromiumLinkerTestActivity'
+_COMMAND_LINE_FILE = '/data/local/tmp/chromium-linker-test-command-line'
+
+# Logcat filters used during each test. Only the 'chromium' one is really
+# needed, but the logs are added to the TestResult in case of error, and
+# it is handy to have others as well when troubleshooting.
+_LOGCAT_FILTERS = ['*:s', 'chromium:v', 'cr_chromium:v',
+                   'cr_ChromiumAndroidLinker:v', 'cr_LibraryLoader:v',
+                   'cr_LinkerTest:v']
+#_LOGCAT_FILTERS = ['*:v']  ## DEBUG
+
+# Regular expression used to match status lines in logcat.
+_RE_BROWSER_STATUS_LINE = re.compile(r' BROWSER_LINKER_TEST: (FAIL|SUCCESS)$')
+_RE_RENDERER_STATUS_LINE = re.compile(r' RENDERER_LINKER_TEST: (FAIL|SUCCESS)$')
+
+def _StartActivityAndWaitForLinkerTestStatus(device, timeout):
+  """Force-start an activity and wait up to |timeout| seconds until the full
+     linker test status lines appear in the logcat, recorded through |device|.
+  Args:
+    device: A DeviceUtils instance.
+    timeout: Timeout in seconds
+  Returns:
+    A (status, logs) tuple, where status is a ResultType constant, and logs
+    if the final logcat output as a string.
+  """
+
+  # 1. Start recording logcat with appropriate filters.
+  with device.GetLogcatMonitor(filter_specs=_LOGCAT_FILTERS) as logmon:
+
+    # 2. Force-start activity.
+    device.StartActivity(
+        intent.Intent(package=_PACKAGE_NAME, activity=_ACTIVITY_NAME),
+        force_stop=True)
+
+    # 3. Wait up to |timeout| seconds until the test status is in the logcat.
+    result = ResultType.PASS
+    try:
+      browser_match = logmon.WaitFor(_RE_BROWSER_STATUS_LINE, timeout=timeout)
+      logging.debug('Found browser match: %s', browser_match.group(0))
+      renderer_match = logmon.WaitFor(_RE_RENDERER_STATUS_LINE,
+                                      timeout=timeout)
+      logging.debug('Found renderer match: %s', renderer_match.group(0))
+      if (browser_match.group(1) != 'SUCCESS'
+          or renderer_match.group(1) != 'SUCCESS'):
+        result = ResultType.FAIL
+    except device_errors.CommandTimeoutError:
+      result = ResultType.TIMEOUT
+
+    return result, '\n'.join(device.adb.Logcat(dump=True))
+
+
+class LibraryLoadMap(dict):
+  """A helper class to pretty-print a map of library names to load addresses."""
+  def __str__(self):
+    items = ['\'%s\': 0x%x' % (name, address) for \
+        (name, address) in self.iteritems()]
+    return '{%s}' % (', '.join(items))
+
+  def __repr__(self):
+    return 'LibraryLoadMap(%s)' % self.__str__()
+
+
+class AddressList(list):
+  """A helper class to pretty-print a list of load addresses."""
+  def __str__(self):
+    items = ['0x%x' % address for address in self]
+    return '[%s]' % (', '.join(items))
+
+  def __repr__(self):
+    return 'AddressList(%s)' % self.__str__()
+
+
+class LinkerTestCaseBase(object):
+  """Base class for linker test cases."""
+
+  def __init__(self, is_modern_linker=False, is_low_memory=False):
+    """Create a test case.
+    Args:
+      is_modern_linker: True to test ModernLinker, False to test LegacyLinker.
+      is_low_memory: True to simulate a low-memory device, False otherwise.
+    """
+    self.is_modern_linker = is_modern_linker
+    if is_modern_linker:
+      test_suffix = 'ForModernLinker'
+    else:
+      test_suffix = 'ForLegacyLinker'
+    self.is_low_memory = is_low_memory
+    if is_low_memory:
+      test_suffix += 'LowMemoryDevice'
+    else:
+      test_suffix += 'RegularDevice'
+    class_name = self.__class__.__name__
+    self.qualified_name = '%s.%s' % (class_name, test_suffix)
+    self.tagged_name = self.qualified_name
+
+  def _RunTest(self, _device):
+    """Run the test, must be overriden.
+    Args:
+      _device: A DeviceUtils interface.
+    Returns:
+      A (status, log) tuple, where <status> is a ResultType constant, and <log>
+      is the logcat output captured during the test in case of error, or None
+      in case of success.
+    """
+    return ResultType.FAIL, 'Unimplemented _RunTest() method!'
+
+  def Run(self, device):
+    """Run the test on a given device.
+    Args:
+      device: Name of target device where to run the test.
+    Returns:
+      A base_test_result.TestRunResult() instance.
+    """
+    margin = 8
+    print '[ %-*s ] %s' % (margin, 'RUN', self.tagged_name)
+    logging.info('Running linker test: %s', self.tagged_name)
+
+    # Create command-line file on device.
+    if self.is_modern_linker:
+      command_line_flags = '--use-linker=modern'
+    else:
+      command_line_flags = '--use-linker=legacy'
+    if self.is_low_memory:
+      command_line_flags += ' --low-memory-device'
+    device.WriteFile(_COMMAND_LINE_FILE, command_line_flags)
+
+    # Run the test.
+    status, logs = self._RunTest(device)
+
+    result_text = 'OK'
+    if status == ResultType.FAIL:
+      result_text = 'FAILED'
+    elif status == ResultType.TIMEOUT:
+      result_text = 'TIMEOUT'
+    print '[ %*s ] %s' % (margin, result_text, self.tagged_name)
+
+    results = base_test_result.TestRunResults()
+    results.AddResult(
+        base_test_result.BaseTestResult(
+            self.tagged_name,
+            status,
+            log=logs))
+
+    return results
+
+  def __str__(self):
+    return self.tagged_name
+
+  def __repr__(self):
+    return self.tagged_name
+
+
+class LinkerSharedRelroTest(LinkerTestCaseBase):
+  """A linker test case to check the status of shared RELRO sections.
+
+    The core of the checks performed here are pretty simple:
+
+      - Clear the logcat and start recording with an appropriate set of filters.
+      - Create the command-line appropriate for the test-case.
+      - Start the activity (always forcing a cold start).
+      - Every second, look at the current content of the filtered logcat lines
+        and look for instances of the following:
+
+            BROWSER_LINKER_TEST: <status>
+            RENDERER_LINKER_TEST: <status>
+
+        where <status> can be either FAIL or SUCCESS. These lines can appear
+        in any order in the logcat. Once both browser and renderer status are
+        found, stop the loop. Otherwise timeout after 30 seconds.
+
+        Note that there can be other lines beginning with BROWSER_LINKER_TEST:
+        and RENDERER_LINKER_TEST:, but are not followed by a <status> code.
+
+      - The test case passes if the <status> for both the browser and renderer
+        process are SUCCESS. Otherwise its a fail.
+  """
+  def _RunTest(self, device):
+    # Wait up to 30 seconds until the linker test status is in the logcat.
+    return _StartActivityAndWaitForLinkerTestStatus(device, timeout=30)
diff --git a/build/android/pylib/linker/test_runner.py b/build/android/pylib/linker/test_runner.py
new file mode 100644
index 0000000..d345952
--- /dev/null
+++ b/build/android/pylib/linker/test_runner.py
@@ -0,0 +1,97 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Runs linker tests on a particular device."""
+
+import logging
+import os.path
+import sys
+import traceback
+
+from pylib import constants
+from pylib.base import base_test_result
+from pylib.base import base_test_runner
+from pylib.linker import test_case
+
+
+# Name of the Android package to install for this to work.
+_PACKAGE_NAME = 'ChromiumLinkerTest'
+
+
+class LinkerExceptionTestResult(base_test_result.BaseTestResult):
+  """Test result corresponding to a python exception in a host-custom test."""
+
+  def __init__(self, test_name, exc_info):
+    """Constructs a LinkerExceptionTestResult object.
+
+    Args:
+      test_name: name of the test which raised an exception.
+      exc_info: exception info, ostensibly from sys.exc_info().
+    """
+    exc_type, exc_value, exc_traceback = exc_info
+    trace_info = ''.join(traceback.format_exception(exc_type, exc_value,
+                                                    exc_traceback))
+    log_msg = 'Exception:\n' + trace_info
+
+    super(LinkerExceptionTestResult, self).__init__(
+        test_name,
+        base_test_result.ResultType.FAIL,
+        log="%s %s" % (exc_type, log_msg))
+
+
+class LinkerTestRunner(base_test_runner.BaseTestRunner):
+  """Orchestrates running a set of linker tests.
+
+  Any Python exceptions in the tests are caught and translated into a failed
+  result, rather than being re-raised on the main thread.
+  """
+
+  #override
+  def __init__(self, device, tool):
+    """Creates a new LinkerTestRunner.
+
+    Args:
+      device: Attached android device.
+      tool: Name of the Valgrind tool.
+    """
+    super(LinkerTestRunner, self).__init__(device, tool)
+
+  #override
+  def InstallTestPackage(self):
+    apk_path = os.path.join(
+        constants.GetOutDirectory(), 'apks', '%s.apk' % _PACKAGE_NAME)
+
+    if not os.path.exists(apk_path):
+      raise Exception('%s not found, please build it' % apk_path)
+
+    self.device.Install(apk_path)
+
+  #override
+  def RunTest(self, test):
+    """Sets up and runs a test case.
+
+    Args:
+      test: An object which is ostensibly a subclass of LinkerTestCaseBase.
+
+    Returns:
+      A TestRunResults object which contains the result produced by the test
+      and, in the case of a failure, the test that should be retried.
+    """
+
+    assert isinstance(test, test_case.LinkerTestCaseBase)
+
+    try:
+      results = test.Run(self.device)
+    except Exception: # pylint: disable=broad-except
+      logging.exception('Caught exception while trying to run test: ' +
+                        test.tagged_name)
+      exc_info = sys.exc_info()
+      results = base_test_result.TestRunResults()
+      results.AddResult(LinkerExceptionTestResult(
+          test.tagged_name, exc_info))
+
+    if not results.DidRunPass():
+      return results, test
+    else:
+      return results, None
diff --git a/build/android/pylib/local/__init__.py b/build/android/pylib/local/__init__.py
new file mode 100644
index 0000000..4d6aabb
--- /dev/null
+++ b/build/android/pylib/local/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/build/android/pylib/local/device/__init__.py b/build/android/pylib/local/device/__init__.py
new file mode 100644
index 0000000..4d6aabb
--- /dev/null
+++ b/build/android/pylib/local/device/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/build/android/pylib/local/device/local_device_environment.py b/build/android/pylib/local/device/local_device_environment.py
new file mode 100644
index 0000000..fce1d2d
--- /dev/null
+++ b/build/android/pylib/local/device/local_device_environment.py
@@ -0,0 +1,135 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import datetime
+import logging
+import os
+import shutil
+import tempfile
+import threading
+
+from devil.android import device_blacklist
+from devil.android import device_errors
+from devil.android import device_utils
+from devil.android import logcat_monitor
+from devil.utils import file_utils
+from devil.utils import parallelizer
+from pylib import constants
+from pylib.base import environment
+
+
+def _DeviceCachePath(device):
+  file_name = 'device_cache_%s.json' % device.adb.GetDeviceSerial()
+  return os.path.join(constants.GetOutDirectory(), file_name)
+
+
+class LocalDeviceEnvironment(environment.Environment):
+
+  def __init__(self, args, _error_func):
+    super(LocalDeviceEnvironment, self).__init__()
+    self._blacklist = (device_blacklist.Blacklist(args.blacklist_file)
+                       if args.blacklist_file
+                       else None)
+    self._device_serial = args.test_device
+    self._devices_lock = threading.Lock()
+    self._devices = []
+    self._concurrent_adb = args.enable_concurrent_adb
+    self._enable_device_cache = args.enable_device_cache
+    self._logcat_monitors = []
+    self._logcat_output_dir = args.logcat_output_dir
+    self._logcat_output_file = args.logcat_output_file
+    self._max_tries = 1 + args.num_retries
+    self._skip_clear_data = args.skip_clear_data
+    self._tool_name = args.tool
+
+  #override
+  def SetUp(self):
+    available_devices = device_utils.DeviceUtils.HealthyDevices(
+        self._blacklist, enable_device_files_cache=self._enable_device_cache,
+        default_retries=self._max_tries - 1)
+    if not available_devices:
+      raise device_errors.NoDevicesError
+    if self._device_serial:
+      self._devices = [d for d in available_devices
+                       if d.adb.GetDeviceSerial() == self._device_serial]
+      if not self._devices:
+        raise device_errors.DeviceUnreachableError(
+            'Could not find device %r' % self._device_serial)
+    else:
+      self._devices = available_devices
+
+    if self._enable_device_cache:
+      for d in self._devices:
+        cache_path = _DeviceCachePath(d)
+        if os.path.exists(cache_path):
+          logging.info('Using device cache: %s', cache_path)
+          with open(cache_path) as f:
+            d.LoadCacheData(f.read())
+          # Delete cached file so that any exceptions cause it to be cleared.
+          os.unlink(cache_path)
+    if self._logcat_output_file:
+      self._logcat_output_dir = tempfile.mkdtemp()
+    if self._logcat_output_dir:
+      for d in self._devices:
+        logcat_file = os.path.join(
+            self._logcat_output_dir,
+            '%s_%s' % (d.adb.GetDeviceSerial(),
+                       datetime.datetime.utcnow().strftime('%Y%m%dT%H%M%S')))
+        monitor = logcat_monitor.LogcatMonitor(
+            d.adb, clear=True, output_file=logcat_file)
+        self._logcat_monitors.append(monitor)
+        monitor.Start()
+
+  @property
+  def concurrent_adb(self):
+    return self._concurrent_adb
+
+  @property
+  def devices(self):
+    if not self._devices:
+      raise device_errors.NoDevicesError()
+    return self._devices
+
+  @property
+  def max_tries(self):
+    return self._max_tries
+
+  @property
+  def parallel_devices(self):
+    return parallelizer.SyncParallelizer(self.devices)
+
+  @property
+  def skip_clear_data(self):
+    return self._skip_clear_data
+
+  @property
+  def tool(self):
+    return self._tool_name
+
+  #override
+  def TearDown(self):
+    # Write the cache even when not using it so that it will be ready the first
+    # time that it is enabled. Writing it every time is also necessary so that
+    # an invalid cache can be flushed just by disabling it for one run.
+    for d in self._devices:
+      cache_path = _DeviceCachePath(d)
+      with open(cache_path, 'w') as f:
+        f.write(d.DumpCacheData())
+        logging.info('Wrote device cache: %s', cache_path)
+    for m in self._logcat_monitors:
+      m.Stop()
+      m.Close()
+    if self._logcat_output_file:
+      file_utils.MergeFiles(
+          self._logcat_output_file,
+          [m.output_file for m in self._logcat_monitors])
+      shutil.rmtree(self._logcat_output_dir)
+
+  def BlacklistDevice(self, device, reason='local_device_failure'):
+    device_serial = device.adb.GetDeviceSerial()
+    if self._blacklist:
+      self._blacklist.Extend([device_serial], reason=reason)
+    with self._devices_lock:
+      self._devices = [d for d in self._devices if str(d) != device_serial]
+
diff --git a/build/android/pylib/local/device/local_device_gtest_run.py b/build/android/pylib/local/device/local_device_gtest_run.py
new file mode 100644
index 0000000..2630af9
--- /dev/null
+++ b/build/android/pylib/local/device/local_device_gtest_run.py
@@ -0,0 +1,370 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+import itertools
+import logging
+import os
+import posixpath
+
+from devil.android import device_errors
+from devil.android import device_temp_file
+from devil.android import ports
+from devil.utils import reraiser_thread
+from pylib import constants
+from pylib.base import base_test_result
+from pylib.gtest import gtest_test_instance
+from pylib.local import local_test_server_spawner
+from pylib.local.device import local_device_environment
+from pylib.local.device import local_device_test_run
+
+_COMMAND_LINE_FLAGS_SUPPORTED = True
+
+_MAX_INLINE_FLAGS_LENGTH = 50  # Arbitrarily chosen.
+_EXTRA_COMMAND_LINE_FILE = (
+    'org.chromium.native_test.NativeTestActivity.CommandLineFile')
+_EXTRA_COMMAND_LINE_FLAGS = (
+    'org.chromium.native_test.NativeTestActivity.CommandLineFlags')
+_EXTRA_TEST_LIST = (
+    'org.chromium.native_test.NativeTestInstrumentationTestRunner'
+        '.TestList')
+_EXTRA_TEST = (
+    'org.chromium.native_test.NativeTestInstrumentationTestRunner'
+        '.Test')
+
+_MAX_SHARD_SIZE = 256
+_SECONDS_TO_NANOS = int(1e9)
+
+# The amount of time a test executable may run before it gets killed.
+_TEST_TIMEOUT_SECONDS = 30*60
+
+# TODO(jbudorick): Move this up to the test instance if the net test server is
+# handled outside of the APK for the remote_device environment.
+_SUITE_REQUIRES_TEST_SERVER_SPAWNER = [
+  'components_browsertests', 'content_unittests', 'content_browsertests',
+  'net_unittests', 'unit_tests'
+]
+
+# No-op context manager. If we used Python 3, we could change this to
+# contextlib.ExitStack()
+class _NullContextManager(object):
+  def __enter__(self):
+    pass
+  def __exit__(self, *args):
+    pass
+
+
+# TODO(jbudorick): Move this inside _ApkDelegate once TestPackageApk is gone.
+def PullAppFilesImpl(device, package, files, directory):
+  device_dir = device.GetApplicationDataDirectory(package)
+  host_dir = os.path.join(directory, str(device))
+  for f in files:
+    device_file = posixpath.join(device_dir, f)
+    host_file = os.path.join(host_dir, *f.split(posixpath.sep))
+    host_file_base, ext = os.path.splitext(host_file)
+    for i in itertools.count():
+      host_file = '%s_%d%s' % (host_file_base, i, ext)
+      if not os.path.exists(host_file):
+        break
+    device.PullFile(device_file, host_file)
+
+
+def _ExtractTestsFromFilter(gtest_filter):
+  """Returns the list of tests specified by the given filter.
+
+  Returns:
+    None if the device should be queried for the test list instead.
+  """
+  # Empty means all tests, - means exclude filter.
+  if not gtest_filter or '-' in gtest_filter:
+    return None
+
+  patterns = gtest_filter.split(':')
+  # For a single pattern, allow it even if it has a wildcard so long as the
+  # wildcard comes at the end and there is at least one . to prove the scope is
+  # not too large.
+  # This heuristic is not necessarily faster, but normally is.
+  if len(patterns) == 1 and patterns[0].endswith('*'):
+    no_suffix = patterns[0].rstrip('*')
+    if '*' not in no_suffix and '.' in no_suffix:
+      return patterns
+
+  if '*' in gtest_filter:
+    return None
+  return patterns
+
+
+class _ApkDelegate(object):
+  def __init__(self, test_instance):
+    self._activity = test_instance.activity
+    self._apk_helper = test_instance.apk_helper
+    self._test_apk_incremental_install_script = (
+        test_instance.test_apk_incremental_install_script)
+    self._package = test_instance.package
+    self._runner = test_instance.runner
+    self._permissions = test_instance.permissions
+    self._suite = test_instance.suite
+    self._component = '%s/%s' % (self._package, self._runner)
+    self._extras = test_instance.extras
+
+  def Install(self, device):
+    if self._test_apk_incremental_install_script:
+      local_device_test_run.IncrementalInstall(device, self._apk_helper,
+          self._test_apk_incremental_install_script)
+    else:
+      device.Install(self._apk_helper, reinstall=True,
+                     permissions=self._permissions)
+
+  def Run(self, test, device, flags=None, **kwargs):
+    extras = dict(self._extras)
+
+    if ('timeout' in kwargs
+        and gtest_test_instance.EXTRA_SHARD_NANO_TIMEOUT not in extras):
+      # Make sure the instrumentation doesn't kill the test before the
+      # scripts do. The provided timeout value is in seconds, but the
+      # instrumentation deals with nanoseconds because that's how Android
+      # handles time.
+      extras[gtest_test_instance.EXTRA_SHARD_NANO_TIMEOUT] = int(
+          kwargs['timeout'] * _SECONDS_TO_NANOS)
+
+    command_line_file = _NullContextManager()
+    if flags:
+      if len(flags) > _MAX_INLINE_FLAGS_LENGTH:
+        command_line_file = device_temp_file.DeviceTempFile(device.adb)
+        device.WriteFile(command_line_file.name, '_ %s' % flags)
+        extras[_EXTRA_COMMAND_LINE_FILE] = command_line_file.name
+      else:
+        extras[_EXTRA_COMMAND_LINE_FLAGS] = flags
+
+    test_list_file = _NullContextManager()
+    if test:
+      if len(test) > 1:
+        test_list_file = device_temp_file.DeviceTempFile(device.adb)
+        device.WriteFile(test_list_file.name, '\n'.join(test))
+        extras[_EXTRA_TEST_LIST] = test_list_file.name
+      else:
+        extras[_EXTRA_TEST] = test[0]
+
+    with command_line_file, test_list_file:
+      try:
+        return device.StartInstrumentation(
+            self._component, extras=extras, raw=False, **kwargs)
+      except Exception:
+        device.ForceStop(self._package)
+        raise
+
+  def PullAppFiles(self, device, files, directory):
+    PullAppFilesImpl(device, self._package, files, directory)
+
+  def Clear(self, device):
+    device.ClearApplicationState(self._package, permissions=self._permissions)
+
+
+class _ExeDelegate(object):
+  def __init__(self, tr, dist_dir):
+    self._host_dist_dir = dist_dir
+    self._exe_file_name = os.path.basename(dist_dir)[:-len('__dist')]
+    self._device_dist_dir = posixpath.join(
+        constants.TEST_EXECUTABLE_DIR, os.path.basename(dist_dir))
+    self._test_run = tr
+
+  def Install(self, device):
+    # TODO(jbudorick): Look into merging this with normal data deps pushing if
+    # executables become supported on nonlocal environments.
+    device.PushChangedFiles([(self._host_dist_dir, self._device_dist_dir)],
+                            delete_device_stale=True)
+
+  def Run(self, test, device, flags=None, **kwargs):
+    tool = self._test_run.GetTool(device).GetTestWrapper()
+    if tool:
+      cmd = [tool]
+    else:
+      cmd = []
+    cmd.append(posixpath.join(self._device_dist_dir, self._exe_file_name))
+
+    if test:
+      cmd.append('--gtest_filter=%s' % ':'.join(test))
+    if flags:
+      # TODO(agrieve): This won't work if multiple flags are passed.
+      cmd.append(flags)
+    cwd = constants.TEST_EXECUTABLE_DIR
+
+    env = {
+      'LD_LIBRARY_PATH': self._device_dist_dir
+    }
+    try:
+      gcov_strip_depth = os.environ['NATIVE_COVERAGE_DEPTH_STRIP']
+      external = device.GetExternalStoragePath()
+      env['GCOV_PREFIX'] = '%s/gcov' % external
+      env['GCOV_PREFIX_STRIP'] = gcov_strip_depth
+    except (device_errors.CommandFailedError, KeyError):
+      pass
+
+    output = device.RunShellCommand(
+        cmd, cwd=cwd, env=env, check_return=True, large_output=True, **kwargs)
+    return output
+
+  def PullAppFiles(self, device, files, directory):
+    pass
+
+  def Clear(self, device):
+    device.KillAll(self._exe_file_name, blocking=True, timeout=30, quiet=True)
+
+
+class LocalDeviceGtestRun(local_device_test_run.LocalDeviceTestRun):
+
+  def __init__(self, env, test_instance):
+    assert isinstance(env, local_device_environment.LocalDeviceEnvironment)
+    assert isinstance(test_instance, gtest_test_instance.GtestTestInstance)
+    super(LocalDeviceGtestRun, self).__init__(env, test_instance)
+
+    if self._test_instance.apk:
+      self._delegate = _ApkDelegate(self._test_instance)
+    elif self._test_instance.exe_dist_dir:
+      self._delegate = _ExeDelegate(self, self._test_instance.exe_dist_dir)
+    self._crashes = set()
+    self._servers = collections.defaultdict(list)
+
+  #override
+  def TestPackage(self):
+    return self._test_instance.suite
+
+  #override
+  def SetUp(self):
+    @local_device_test_run.handle_shard_failures_with(
+        on_failure=self._env.BlacklistDevice)
+    def individual_device_set_up(dev):
+      def install_apk():
+        # Install test APK.
+        self._delegate.Install(dev)
+
+      def push_test_data():
+        # Push data dependencies.
+        external_storage = dev.GetExternalStoragePath()
+        data_deps = self._test_instance.GetDataDependencies()
+        host_device_tuples = [
+            (h, d if d is not None else external_storage)
+            for h, d in data_deps]
+        dev.PushChangedFiles(host_device_tuples)
+
+      def init_tool_and_start_servers():
+        tool = self.GetTool(dev)
+        tool.CopyFiles(dev)
+        tool.SetupEnvironment()
+
+        self._servers[str(dev)] = []
+        if self.TestPackage() in _SUITE_REQUIRES_TEST_SERVER_SPAWNER:
+          self._servers[str(dev)].append(
+              local_test_server_spawner.LocalTestServerSpawner(
+                  ports.AllocateTestServerPort(), dev, tool))
+
+        for s in self._servers[str(dev)]:
+          s.SetUp()
+
+      steps = (install_apk, push_test_data, init_tool_and_start_servers)
+      if self._env.concurrent_adb:
+        reraiser_thread.RunAsync(steps)
+      else:
+        for step in steps:
+          step()
+
+    self._env.parallel_devices.pMap(individual_device_set_up)
+
+  #override
+  def _ShouldShard(self):
+    return True
+
+  #override
+  def _CreateShards(self, tests):
+    # _crashes are tests that might crash and make the tests in the same shard
+    # following the crashed testcase not run.
+    # Thus we need to create separate shards for each crashed testcase,
+    # so that other tests can be run.
+    device_count = len(self._env.devices)
+    shards = []
+
+    # Add shards with only one suspect testcase.
+    shards += [[crash] for crash in self._crashes if crash in tests]
+
+    # Delete suspect testcase from tests.
+    tests = [test for test in tests if not test in self._crashes]
+
+    for i in xrange(0, device_count):
+      unbounded_shard = tests[i::device_count]
+      shards += [unbounded_shard[j:j+_MAX_SHARD_SIZE]
+                 for j in xrange(0, len(unbounded_shard), _MAX_SHARD_SIZE)]
+    return shards
+
+  #override
+  def _GetTests(self):
+    if self._test_instance.extract_test_list_from_filter:
+      # When the exact list of tests to run is given via command-line (e.g. when
+      # locally iterating on a specific test), skip querying the device (which
+      # takes ~3 seconds).
+      tests = _ExtractTestsFromFilter(self._test_instance.gtest_filter)
+      if tests:
+        return tests
+
+    # Even when there's only one device, it still makes sense to retrieve the
+    # test list so that tests can be split up and run in batches rather than all
+    # at once (since test output is not streamed).
+    @local_device_test_run.handle_shard_failures_with(
+        on_failure=self._env.BlacklistDevice)
+    def list_tests(dev):
+      raw_test_list = self._delegate.Run(
+          None, dev, flags='--gtest_list_tests', timeout=30)
+      tests = gtest_test_instance.ParseGTestListTests(raw_test_list)
+      if not tests:
+        logging.info('No tests found. Output:')
+        for l in raw_test_list:
+          logging.info('  %s', l)
+      tests = self._test_instance.FilterTests(tests)
+      return tests
+
+    # Query all devices in case one fails.
+    test_lists = self._env.parallel_devices.pMap(list_tests).pGet(None)
+
+    # If all devices failed to list tests, raise an exception.
+    # Check that tl is not None and is not empty.
+    if all(not tl for tl in test_lists):
+      raise device_errors.CommandFailedError(
+          'Failed to list tests on any device')
+    return list(sorted(set().union(*[set(tl) for tl in test_lists if tl])))
+
+  #override
+  def _RunTest(self, device, test):
+    # Run the test.
+    timeout = (self._test_instance.shard_timeout
+               * self.GetTool(device).GetTimeoutScale())
+    output = self._delegate.Run(
+        test, device, flags=self._test_instance.test_arguments,
+        timeout=timeout, retries=0)
+    for s in self._servers[str(device)]:
+      s.Reset()
+    if self._test_instance.app_files:
+      self._delegate.PullAppFiles(device, self._test_instance.app_files,
+                                  self._test_instance.app_file_dir)
+    if not self._env.skip_clear_data:
+      self._delegate.Clear(device)
+
+    # Parse the output.
+    # TODO(jbudorick): Transition test scripts away from parsing stdout.
+    results = self._test_instance.ParseGTestOutput(output)
+
+    # Check whether there are any crashed testcases.
+    self._crashes.update(r.GetName() for r in results
+                         if r.GetType() == base_test_result.ResultType.CRASH)
+    return results
+
+  #override
+  def TearDown(self):
+    @local_device_test_run.handle_shard_failures
+    def individual_device_tear_down(dev):
+      for s in self._servers.get(str(dev), []):
+        s.TearDown()
+
+      tool = self.GetTool(dev)
+      tool.CleanUpEnvironment()
+
+    self._env.parallel_devices.pMap(individual_device_tear_down)
diff --git a/build/android/pylib/local/device/local_device_instrumentation_test_run.py b/build/android/pylib/local/device/local_device_instrumentation_test_run.py
new file mode 100644
index 0000000..2d574de
--- /dev/null
+++ b/build/android/pylib/local/device/local_device_instrumentation_test_run.py
@@ -0,0 +1,310 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import re
+import time
+
+from devil.android import device_errors
+from devil.android import flag_changer
+from devil.utils import reraiser_thread
+from pylib import valgrind_tools
+from pylib.base import base_test_result
+from pylib.local.device import local_device_test_run
+
+
+TIMEOUT_ANNOTATIONS = [
+  ('Manual', 10 * 60 * 60),
+  ('IntegrationTest', 30 * 60),
+  ('External', 10 * 60),
+  ('EnormousTest', 10 * 60),
+  ('LargeTest', 5 * 60),
+  ('MediumTest', 3 * 60),
+  ('SmallTest', 1 * 60),
+]
+
+
+# TODO(jbudorick): Make this private once the instrumentation test_runner is
+# deprecated.
+def DidPackageCrashOnDevice(package_name, device):
+  # Dismiss any error dialogs. Limit the number in case we have an error
+  # loop or we are failing to dismiss.
+  try:
+    for _ in xrange(10):
+      package = device.DismissCrashDialogIfNeeded()
+      if not package:
+        return False
+      # Assume test package convention of ".test" suffix
+      if package in package_name:
+        return True
+  except device_errors.CommandFailedError:
+    logging.exception('Error while attempting to dismiss crash dialog.')
+  return False
+
+
+_CURRENT_FOCUS_CRASH_RE = re.compile(
+    r'\s*mCurrentFocus.*Application (Error|Not Responding): (\S+)}')
+
+
+class LocalDeviceInstrumentationTestRun(
+    local_device_test_run.LocalDeviceTestRun):
+  def __init__(self, env, test_instance):
+    super(LocalDeviceInstrumentationTestRun, self).__init__(env, test_instance)
+    self._flag_changers = {}
+
+  def TestPackage(self):
+    return self._test_instance.suite
+
+  def SetUp(self):
+    def substitute_external_storage(d, external_storage):
+      if not d:
+        return external_storage
+      elif isinstance(d, list):
+        return '/'.join(p if p else external_storage for p in d)
+      else:
+        return d
+
+    @local_device_test_run.handle_shard_failures_with(
+        self._env.BlacklistDevice)
+    def individual_device_set_up(dev, host_device_tuples):
+      def install_apk():
+        if self._test_instance.apk_under_test:
+          if self._test_instance.apk_under_test_incremental_install_script:
+            local_device_test_run.IncrementalInstall(
+                dev,
+                self._test_instance.apk_under_test,
+                self._test_instance.apk_under_test_incremental_install_script)
+          else:
+            permissions = self._test_instance.apk_under_test.GetPermissions()
+            dev.Install(self._test_instance.apk_under_test,
+                        permissions=permissions)
+
+        if self._test_instance.test_apk_incremental_install_script:
+          local_device_test_run.IncrementalInstall(
+              dev,
+              self._test_instance.test_apk,
+              self._test_instance.test_apk_incremental_install_script)
+        else:
+          permissions = self._test_instance.test_apk.GetPermissions()
+          dev.Install(self._test_instance.test_apk, permissions=permissions)
+
+        for apk in self._test_instance.additional_apks:
+          dev.Install(apk)
+
+        # Set debug app in order to enable reading command line flags on user
+        # builds
+        if self._test_instance.flags:
+          if not self._test_instance.package_info:
+            logging.error("Couldn't set debug app: no package info")
+          elif not self._test_instance.package_info.package:
+            logging.error("Couldn't set debug app: no package defined")
+          else:
+            dev.RunShellCommand(['am', 'set-debug-app', '--persistent',
+                                  self._test_instance.package_info.package],
+                                check_return=True)
+
+      def push_test_data():
+        external_storage = dev.GetExternalStoragePath()
+        host_device_tuples_substituted = [
+            (h, substitute_external_storage(d, external_storage))
+            for h, d in host_device_tuples]
+        logging.info('instrumentation data deps:')
+        for h, d in host_device_tuples_substituted:
+          logging.info('%r -> %r', h, d)
+        dev.PushChangedFiles(host_device_tuples_substituted)
+
+      def create_flag_changer():
+        if self._test_instance.flags:
+          if not self._test_instance.package_info:
+            logging.error("Couldn't set flags: no package info")
+          elif not self._test_instance.package_info.cmdline_file:
+            logging.error("Couldn't set flags: no cmdline_file")
+          else:
+            self._CreateFlagChangerIfNeeded(dev)
+            logging.debug('Attempting to set flags: %r',
+                          self._test_instance.flags)
+            self._flag_changers[str(dev)].AddFlags(self._test_instance.flags)
+
+        valgrind_tools.SetChromeTimeoutScale(
+            dev, self._test_instance.timeout_scale)
+
+      steps = (install_apk, push_test_data, create_flag_changer)
+      if self._env.concurrent_adb:
+        reraiser_thread.RunAsync(steps)
+      else:
+        for step in steps:
+          step()
+
+    self._env.parallel_devices.pMap(
+        individual_device_set_up,
+        self._test_instance.GetDataDependencies())
+
+  def TearDown(self):
+    @local_device_test_run.handle_shard_failures_with(
+        self._env.BlacklistDevice)
+    def individual_device_tear_down(dev):
+      if str(dev) in self._flag_changers:
+        self._flag_changers[str(dev)].Restore()
+
+      # Remove package-specific configuration
+      dev.RunShellCommand(['am', 'clear-debug-app'], check_return=True)
+
+      valgrind_tools.SetChromeTimeoutScale(dev, None)
+
+    self._env.parallel_devices.pMap(individual_device_tear_down)
+
+  def _CreateFlagChangerIfNeeded(self, device):
+    if not str(device) in self._flag_changers:
+      self._flag_changers[str(device)] = flag_changer.FlagChanger(
+        device, self._test_instance.package_info.cmdline_file)
+
+  #override
+  def _CreateShards(self, tests):
+    return tests
+
+  #override
+  def _GetTests(self):
+    return self._test_instance.GetTests()
+
+  #override
+  def _GetTestName(self, test):
+    return '%s#%s' % (test['class'], test['method'])
+
+  def _GetTestNameForDisplay(self, test):
+    display_name = self._GetTestName(test)
+    flags = test['flags']
+    if flags.add:
+      display_name = '%s with {%s}' % (display_name, ' '.join(flags.add))
+    if flags.remove:
+      display_name = '%s without {%s}' % (display_name, ' '.join(flags.remove))
+    return display_name
+
+  #override
+  def _RunTest(self, device, test):
+    extras = {}
+
+    flags = None
+    test_timeout_scale = None
+    if isinstance(test, list):
+      if not self._test_instance.driver_apk:
+        raise Exception('driver_apk does not exist. '
+                        'Please build it and try again.')
+
+      def name_and_timeout(t):
+        n = self._GetTestName(t)
+        i = self._GetTimeoutFromAnnotations(t['annotations'], n)
+        return (n, i)
+
+      test_names, timeouts = zip(*(name_and_timeout(t) for t in test))
+
+      test_name = ','.join(test_names)
+      test_display_name = test_name
+      target = '%s/%s' % (
+          self._test_instance.driver_package,
+          self._test_instance.driver_name)
+      extras.update(
+          self._test_instance.GetDriverEnvironmentVars(
+              test_list=test_names))
+      timeout = sum(timeouts)
+    else:
+      test_name = self._GetTestName(test)
+      test_display_name = test_name
+      target = '%s/%s' % (
+          self._test_instance.test_package, self._test_instance.test_runner)
+      extras['class'] = test_name
+      if 'flags' in test:
+        flags = test['flags']
+        test_display_name = self._GetTestNameForDisplay(test)
+      timeout = self._GetTimeoutFromAnnotations(
+        test['annotations'], test_display_name)
+
+      test_timeout_scale = self._GetTimeoutScaleFromAnnotations(
+          test['annotations'])
+      if test_timeout_scale and test_timeout_scale != 1:
+        valgrind_tools.SetChromeTimeoutScale(
+            device, test_timeout_scale * self._test_instance.timeout_scale)
+
+    logging.info('preparing to run %s: %s', test_display_name, test)
+
+    if flags:
+      self._CreateFlagChangerIfNeeded(device)
+      self._flag_changers[str(device)].PushFlags(
+        add=flags.add, remove=flags.remove)
+
+    try:
+      time_ms = lambda: int(time.time() * 1e3)
+      start_ms = time_ms()
+      output = device.StartInstrumentation(
+          target, raw=True, extras=extras, timeout=timeout, retries=0)
+      duration_ms = time_ms() - start_ms
+    finally:
+      if flags:
+        self._flag_changers[str(device)].Restore()
+      if test_timeout_scale:
+        valgrind_tools.SetChromeTimeoutScale(
+            device, self._test_instance.timeout_scale)
+
+    # TODO(jbudorick): Make instrumentation tests output a JSON so this
+    # doesn't have to parse the output.
+    result_code, result_bundle, statuses = (
+        self._test_instance.ParseAmInstrumentRawOutput(output))
+    results = self._test_instance.GenerateTestResults(
+        result_code, result_bundle, statuses, start_ms, duration_ms)
+    if flags:
+      for r in results:
+        if r.GetName() == test_name:
+          r.SetName(test_display_name)
+    if DidPackageCrashOnDevice(self._test_instance.test_package, device):
+      for r in results:
+        if r.GetType() == base_test_result.ResultType.UNKNOWN:
+          r.SetType(base_test_result.ResultType.CRASH)
+
+    if any(r.GetType() not in (base_test_result.ResultType.PASS,
+                               base_test_result.ResultType.SKIP)
+           for r in results):
+      logging.info('detected failure in %s. raw output:', test_display_name)
+      for l in output:
+        logging.info('  %s', l)
+      if (not self._env.skip_clear_data
+          and self._test_instance.package_info):
+        permissions = (
+            self._test_instance.apk_under_test.GetPermissions()
+            if self._test_instance.apk_under_test
+            else None)
+        device.ClearApplicationState(self._test_instance.package_info.package,
+                                     permissions=permissions)
+
+    else:
+      logging.debug('raw output from %s:', test_display_name)
+      for l in output:
+        logging.debug('  %s', l)
+
+    return results
+
+  #override
+  def _ShouldShard(self):
+    return True
+
+  @classmethod
+  def _GetTimeoutScaleFromAnnotations(cls, annotations):
+    try:
+      return int(annotations.get('TimeoutScale', 1))
+    except ValueError as e:
+      logging.warning("Non-integer value of TimeoutScale ignored. (%s)", str(e))
+      return 1
+
+  @classmethod
+  def _GetTimeoutFromAnnotations(cls, annotations, test_name):
+    for k, v in TIMEOUT_ANNOTATIONS:
+      if k in annotations:
+        timeout = v
+        break
+    else:
+      logging.warning('Using default 1 minute timeout for %s', test_name)
+      timeout = 60
+
+    timeout *= cls._GetTimeoutScaleFromAnnotations(annotations)
+
+    return timeout
+
diff --git a/build/android/pylib/local/device/local_device_test_run.py b/build/android/pylib/local/device/local_device_test_run.py
new file mode 100644
index 0000000..bf4bb65
--- /dev/null
+++ b/build/android/pylib/local/device/local_device_test_run.py
@@ -0,0 +1,195 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import fnmatch
+import functools
+import imp
+import logging
+
+from devil import base_error
+from devil.android import device_errors
+from pylib import valgrind_tools
+from pylib.base import base_test_result
+from pylib.base import test_run
+from pylib.base import test_collection
+
+
+def IncrementalInstall(device, apk_helper, installer_script):
+  """Performs an incremental install.
+
+  Args:
+    device: Device to install on.
+    apk_helper: ApkHelper instance for the _incremental.apk.
+    installer_script: Path to the installer script for the incremental apk.
+  """
+  try:
+    install_wrapper = imp.load_source('install_wrapper', installer_script)
+  except IOError:
+    raise Exception('Incremental install script not found: %s\n' %
+                    installer_script)
+  params = install_wrapper.GetInstallParameters()
+
+  from incremental_install import installer
+  installer.Install(device, apk_helper, split_globs=params['splits'],
+                    native_libs=params['native_libs'],
+                    dex_files=params['dex_files'],
+                    permissions=None)  # Auto-grant permissions from manifest.
+
+
+def handle_shard_failures(f):
+  """A decorator that handles device failures for per-device functions.
+
+  Args:
+    f: the function being decorated. The function must take at least one
+      argument, and that argument must be the device.
+  """
+  return handle_shard_failures_with(None)(f)
+
+
+def handle_shard_failures_with(on_failure):
+  """A decorator that handles device failures for per-device functions.
+
+  This calls on_failure in the event of a failure.
+
+  Args:
+    f: the function being decorated. The function must take at least one
+      argument, and that argument must be the device.
+    on_failure: A binary function to call on failure.
+  """
+  def decorator(f):
+    @functools.wraps(f)
+    def wrapper(dev, *args, **kwargs):
+      try:
+        return f(dev, *args, **kwargs)
+      except device_errors.CommandTimeoutError:
+        logging.exception('Shard timed out: %s(%s)', f.__name__, str(dev))
+      except device_errors.DeviceUnreachableError:
+        logging.exception('Shard died: %s(%s)', f.__name__, str(dev))
+      except base_error.BaseError:
+        logging.exception('Shard failed: %s(%s)', f.__name__,
+                          str(dev))
+      if on_failure:
+        on_failure(dev, f.__name__)
+      return None
+
+    return wrapper
+
+  return decorator
+
+
+class LocalDeviceTestRun(test_run.TestRun):
+
+  def __init__(self, env, test_instance):
+    super(LocalDeviceTestRun, self).__init__(env, test_instance)
+    self._tools = {}
+
+  #override
+  def RunTests(self):
+    tests = self._GetTests()
+
+    @handle_shard_failures
+    def run_tests_on_device(dev, tests, results):
+      for test in tests:
+        result = None
+        try:
+          result = self._RunTest(dev, test)
+          if isinstance(result, base_test_result.BaseTestResult):
+            results.AddResult(result)
+          elif isinstance(result, list):
+            results.AddResults(result)
+          else:
+            raise Exception(
+                'Unexpected result type: %s' % type(result).__name__)
+        except:
+          if isinstance(tests, test_collection.TestCollection):
+            tests.add(test)
+          raise
+        finally:
+          if isinstance(tests, test_collection.TestCollection):
+            tests.test_completed()
+
+
+      logging.info('Finished running tests on this device.')
+
+    tries = 0
+    results = base_test_result.TestRunResults()
+    all_fail_results = {}
+    while tries < self._env.max_tries and tests:
+      logging.info('STARTING TRY #%d/%d', tries + 1, self._env.max_tries)
+      logging.info('Will run %d tests on %d devices: %s',
+                   len(tests), len(self._env.devices),
+                   ', '.join(str(d) for d in self._env.devices))
+      for t in tests:
+        logging.debug('  %s', t)
+
+      try_results = base_test_result.TestRunResults()
+      if self._ShouldShard():
+        tc = test_collection.TestCollection(self._CreateShards(tests))
+        self._env.parallel_devices.pMap(
+            run_tests_on_device, tc, try_results).pGet(None)
+      else:
+        self._env.parallel_devices.pMap(
+            run_tests_on_device, tests, try_results).pGet(None)
+
+      for result in try_results.GetAll():
+        if result.GetType() in (base_test_result.ResultType.PASS,
+                                base_test_result.ResultType.SKIP):
+          results.AddResult(result)
+        else:
+          all_fail_results[result.GetName()] = result
+
+      results_names = set(r.GetName() for r in results.GetAll())
+
+      def has_test_result(name):
+        # When specifying a test filter, names can contain trailing wildcards.
+        # See local_device_gtest_run._ExtractTestsFromFilter()
+        if name.endswith('*'):
+          return any(fnmatch.fnmatch(n, name) for n in results_names)
+        return name in results_names
+
+      tests = [t for t in tests if not has_test_result(self._GetTestName(t))]
+      tries += 1
+      logging.info('FINISHED TRY #%d/%d', tries, self._env.max_tries)
+      if tests:
+        logging.info('%d failed tests remain.', len(tests))
+      else:
+        logging.info('All tests completed.')
+
+    all_unknown_test_names = set(self._GetTestName(t) for t in tests)
+    all_failed_test_names = set(all_fail_results.iterkeys())
+
+    unknown_tests = all_unknown_test_names.difference(all_failed_test_names)
+    failed_tests = all_failed_test_names.intersection(all_unknown_test_names)
+
+    if unknown_tests:
+      results.AddResults(
+          base_test_result.BaseTestResult(
+              u, base_test_result.ResultType.UNKNOWN)
+          for u in unknown_tests)
+    if failed_tests:
+      results.AddResults(all_fail_results[f] for f in failed_tests)
+
+    return results
+
+  def GetTool(self, device):
+    if not str(device) in self._tools:
+      self._tools[str(device)] = valgrind_tools.CreateTool(
+          self._env.tool, device)
+    return self._tools[str(device)]
+
+  def _CreateShards(self, tests):
+    raise NotImplementedError
+
+  # pylint: disable=no-self-use
+  def _GetTestName(self, test):
+    return test
+
+  def _GetTests(self):
+    raise NotImplementedError
+
+  def _RunTest(self, device, test):
+    raise NotImplementedError
+
+  def _ShouldShard(self):
+    raise NotImplementedError
diff --git a/build/android/pylib/local/local_test_server_spawner.py b/build/android/pylib/local/local_test_server_spawner.py
new file mode 100644
index 0000000..db9fbfd
--- /dev/null
+++ b/build/android/pylib/local/local_test_server_spawner.py
@@ -0,0 +1,45 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from devil.android import forwarder
+from pylib import chrome_test_server_spawner
+from pylib.base import test_server
+
+
+class LocalTestServerSpawner(test_server.TestServer):
+
+  def __init__(self, port, device, tool):
+    super(LocalTestServerSpawner, self).__init__()
+    self._device = device
+    self._spawning_server = chrome_test_server_spawner.SpawningServer(
+        port, device, tool)
+    self._tool = tool
+
+  @property
+  def server_address(self):
+    return self._spawning_server.server.server_address
+
+  @property
+  def port(self):
+    return self.server_address[1]
+
+  #override
+  def SetUp(self):
+    self._device.WriteFile(
+        '%s/net-test-server-ports' % self._device.GetExternalStoragePath(),
+        '%s:0' % str(self.port))
+    forwarder.Forwarder.Map(
+        [(self.port, self.port)], self._device, self._tool)
+    self._spawning_server.Start()
+
+  #override
+  def Reset(self):
+    self._spawning_server.CleanupState()
+
+  #override
+  def TearDown(self):
+    self.Reset()
+    self._spawning_server.Stop()
+    forwarder.Forwarder.UnmapDevicePort(self.port, self._device)
+
diff --git a/build/android/pylib/monkey/__init__.py b/build/android/pylib/monkey/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/build/android/pylib/monkey/__init__.py
diff --git a/build/android/pylib/monkey/setup.py b/build/android/pylib/monkey/setup.py
new file mode 100644
index 0000000..fe690a5
--- /dev/null
+++ b/build/android/pylib/monkey/setup.py
@@ -0,0 +1,27 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Generates test runner factory and tests for monkey tests."""
+
+from pylib.monkey import test_runner
+
+
+def Setup(test_options):
+  """Create and return the test runner factory and tests.
+
+  Args:
+    test_options: A MonkeyOptions object.
+
+  Returns:
+    A tuple of (TestRunnerFactory, tests).
+  """
+  # Token to replicate across devices as the "test". The TestRunner does all of
+  # the work to run the test.
+  tests = ['MonkeyTest']
+
+  def TestRunnerFactory(device, shard_index):
+    return test_runner.TestRunner(
+        test_options, device, shard_index)
+
+  return (TestRunnerFactory, tests)
diff --git a/build/android/pylib/monkey/test_options.py b/build/android/pylib/monkey/test_options.py
new file mode 100644
index 0000000..54d3d08
--- /dev/null
+++ b/build/android/pylib/monkey/test_options.py
@@ -0,0 +1,16 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Defines the MonkeyOptions named tuple."""
+
+import collections
+
+MonkeyOptions = collections.namedtuple('MonkeyOptions', [
+    'verbose_count',
+    'package',
+    'event_count',
+    'category',
+    'throttle',
+    'seed',
+    'extra_args'])
diff --git a/build/android/pylib/monkey/test_runner.py b/build/android/pylib/monkey/test_runner.py
new file mode 100644
index 0000000..ff4c940
--- /dev/null
+++ b/build/android/pylib/monkey/test_runner.py
@@ -0,0 +1,110 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Runs a monkey test on a single device."""
+
+import logging
+import random
+
+from devil.android import device_errors
+from devil.android.sdk import intent
+from pylib import constants
+from pylib.base import base_test_result
+from pylib.base import base_test_runner
+
+_CHROME_PACKAGE = constants.PACKAGE_INFO['chrome'].package
+
+class TestRunner(base_test_runner.BaseTestRunner):
+  """A TestRunner instance runs a monkey test on a single device."""
+
+  def __init__(self, test_options, device, _):
+    super(TestRunner, self).__init__(device, None)
+    self._options = test_options
+    self._package = constants.PACKAGE_INFO[self._options.package].package
+    self._activity = constants.PACKAGE_INFO[self._options.package].activity
+
+  def _LaunchMonkeyTest(self):
+    """Runs monkey test for a given package.
+
+    Returns:
+      Output from the monkey command on the device.
+    """
+
+    timeout_ms = self._options.event_count * self._options.throttle * 1.5
+
+    cmd = ['monkey',
+           '-p %s' % self._package,
+           ' '.join(['-c %s' % c for c in self._options.category]),
+           '--throttle %d' % self._options.throttle,
+           '-s %d' % (self._options.seed or random.randint(1, 100)),
+           '-v ' * self._options.verbose_count,
+           '--monitor-native-crashes',
+           '--kill-process-after-error',
+           self._options.extra_args,
+           '%d' % self._options.event_count]
+    return self.device.RunShellCommand(' '.join(cmd), timeout=timeout_ms)
+
+  def RunTest(self, test_name):
+    """Run a Monkey test on the device.
+
+    Args:
+      test_name: String to use for logging the test result.
+
+    Returns:
+      A tuple of (TestRunResults, retry).
+    """
+    self.device.StartActivity(
+        intent.Intent(package=self._package, activity=self._activity,
+                      action='android.intent.action.MAIN'),
+        blocking=True, force_stop=True)
+
+    # Chrome crashes are not always caught by Monkey test runner.
+    # Verify Chrome has the same PID before and after the test.
+    before_pids = self.device.GetPids(self._package)
+
+    # Run the test.
+    output = ''
+    if before_pids:
+      if len(before_pids.get(self._package, [])) > 1:
+        raise Exception(
+            'At most one instance of process %s expected but found pids: '
+            '%s' % (self._package, before_pids))
+      output = '\n'.join(self._LaunchMonkeyTest())
+      after_pids = self.device.GetPids(self._package)
+
+    crashed = True
+    if not self._package in before_pids:
+      logging.error('Failed to start the process.')
+    elif not self._package in after_pids:
+      logging.error('Process %s has died.', before_pids[self._package])
+    elif before_pids[self._package] != after_pids[self._package]:
+      logging.error('Detected process restart %s -> %s',
+                    before_pids[self._package], after_pids[self._package])
+    else:
+      crashed = False
+
+    results = base_test_result.TestRunResults()
+    success_pattern = 'Events injected: %d' % self._options.event_count
+    if success_pattern in output and not crashed:
+      result = base_test_result.BaseTestResult(
+          test_name, base_test_result.ResultType.PASS, log=output)
+    else:
+      result = base_test_result.BaseTestResult(
+          test_name, base_test_result.ResultType.FAIL, log=output)
+      if 'chrome' in self._options.package:
+        logging.warning('Starting MinidumpUploadService...')
+        # TODO(jbudorick): Update this after upstreaming.
+        minidump_intent = intent.Intent(
+            action='%s.crash.ACTION_FIND_ALL' % _CHROME_PACKAGE,
+            package=self._package,
+            activity='%s.crash.MinidumpUploadService' % _CHROME_PACKAGE)
+        try:
+          self.device.RunShellCommand(
+              ['am', 'startservice'] + minidump_intent.am_args,
+              as_root=True, check_return=True)
+        except device_errors.CommandFailedError:
+          logging.exception('Failed to start MinidumpUploadService')
+
+    results.AddResult(result)
+    return results, False
diff --git a/build/android/pylib/perf/__init__.py b/build/android/pylib/perf/__init__.py
new file mode 100644
index 0000000..9228df8
--- /dev/null
+++ b/build/android/pylib/perf/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/build/android/pylib/perf/cache_control.py b/build/android/pylib/perf/cache_control.py
new file mode 100644
index 0000000..8b46575
--- /dev/null
+++ b/build/android/pylib/perf/cache_control.py
@@ -0,0 +1,8 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=unused-wildcard-import
+# pylint: disable=wildcard-import
+
+from devil.android.perf.cache_control import *
diff --git a/build/android/pylib/perf/perf_control.py b/build/android/pylib/perf/perf_control.py
new file mode 100644
index 0000000..d95d7b7
--- /dev/null
+++ b/build/android/pylib/perf/perf_control.py
@@ -0,0 +1,8 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=unused-wildcard-import
+# pylint: disable=wildcard-import
+
+from devil.android.perf.perf_control import *
diff --git a/build/android/pylib/perf/setup.py b/build/android/pylib/perf/setup.py
new file mode 100644
index 0000000..31db14f
--- /dev/null
+++ b/build/android/pylib/perf/setup.py
@@ -0,0 +1,105 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Generates test runner factory and tests for performance tests."""
+
+import json
+import fnmatch
+import logging
+import os
+import shutil
+
+from devil.android import device_list
+from devil.android import device_utils
+from pylib import constants
+from pylib.perf import test_runner
+from pylib.utils import test_environment
+
+
+def _GetAllDevices(active_devices, devices_path):
+  # TODO(rnephew): Delete this when recipes change to pass file path.
+  if not devices_path:
+    logging.warning('Known devices file path not being passed. For device '
+                    'affinity to work properly, it must be passed.')
+    devices_path = os.path.join(os.environ.get('CHROMIUM_OUT_DIR', 'out'),
+                                device_list.LAST_DEVICES_FILENAME)
+  try:
+    if devices_path:
+      devices = [device_utils.DeviceUtils(s)
+                 for s in device_list.GetPersistentDeviceList(devices_path)]
+    else:
+      logging.warning('Known devices file path not being passed. For device '
+                      'affinity to work properly, it must be passed.')
+      devices = active_devices
+  except IOError as e:
+    logging.error('Unable to find %s [%s]', devices_path, e)
+    devices = active_devices
+  return sorted(devices)
+
+
+def _GetStepsDictFromSingleStep(test_options):
+  # Running a single command, build the tests structure.
+  steps_dict = {
+    'version': 1,
+    'steps': {
+        'single_step': {
+          'device_affinity': 0,
+          'cmd': test_options.single_step
+        },
+    }
+  }
+  return steps_dict
+
+
+def _GetStepsDict(test_options):
+  if test_options.single_step:
+    return _GetStepsDictFromSingleStep(test_options)
+  if test_options.steps:
+    with file(test_options.steps, 'r') as f:
+      steps = json.load(f)
+
+      # Already using the new format.
+      assert steps['version'] == 1
+      return steps
+
+
+def Setup(test_options, active_devices):
+  """Create and return the test runner factory and tests.
+
+  Args:
+    test_options: A PerformanceOptions object.
+
+  Returns:
+    A tuple of (TestRunnerFactory, tests, devices).
+  """
+  # TODO(bulach): remove this once the bot side lands. BUG=318369
+  constants.SetBuildType('Release')
+  if os.path.exists(constants.PERF_OUTPUT_DIR):
+    shutil.rmtree(constants.PERF_OUTPUT_DIR)
+  os.makedirs(constants.PERF_OUTPUT_DIR)
+
+  # Before running the tests, kill any leftover server.
+  test_environment.CleanupLeftoverProcesses(active_devices)
+
+  # We want to keep device affinity, so return all devices ever seen.
+  all_devices = _GetAllDevices(active_devices, test_options.known_devices_file)
+
+  steps_dict = _GetStepsDict(test_options)
+  sorted_step_names = sorted(steps_dict['steps'].keys())
+
+  if test_options.test_filter:
+    sorted_step_names = fnmatch.filter(sorted_step_names,
+                                       test_options.test_filter)
+
+  flaky_steps = []
+  if test_options.flaky_steps:
+    with file(test_options.flaky_steps, 'r') as f:
+      flaky_steps = json.load(f)
+
+  def TestRunnerFactory(device, shard_index):
+    return test_runner.TestRunner(
+        test_options, device, shard_index, len(all_devices),
+        steps_dict, flaky_steps)
+
+  return (TestRunnerFactory, sorted_step_names, all_devices)
diff --git a/build/android/pylib/perf/surface_stats_collector.py b/build/android/pylib/perf/surface_stats_collector.py
new file mode 100644
index 0000000..98b7fed
--- /dev/null
+++ b/build/android/pylib/perf/surface_stats_collector.py
@@ -0,0 +1,8 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=unused-wildcard-import
+# pylint: disable=wildcard-import
+
+from devil.android.perf.surface_stats_collector import *
diff --git a/build/android/pylib/perf/test_options.py b/build/android/pylib/perf/test_options.py
new file mode 100644
index 0000000..4c923f3
--- /dev/null
+++ b/build/android/pylib/perf/test_options.py
@@ -0,0 +1,24 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Defines the PerfOptions named tuple."""
+
+import collections
+
+PerfOptions = collections.namedtuple('PerfOptions', [
+    'steps',
+    'flaky_steps',
+    'output_json_list',
+    'print_step',
+    'no_timeout',
+    'test_filter',
+    'dry_run',
+    'single_step',
+    'collect_chartjson_data',
+    'output_chartjson_data',
+    'get_output_dir_archive',
+    'max_battery_temp',
+    'min_battery_level',
+    'known_devices_file',
+])
diff --git a/build/android/pylib/perf/test_runner.py b/build/android/pylib/perf/test_runner.py
new file mode 100644
index 0000000..2d45c48
--- /dev/null
+++ b/build/android/pylib/perf/test_runner.py
@@ -0,0 +1,453 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Runs perf tests.
+
+Our buildbot infrastructure requires each slave to run steps serially.
+This is sub-optimal for android, where these steps can run independently on
+multiple connected devices.
+
+The buildbots will run this script multiple times per cycle:
+- First: all steps listed in --steps in will be executed in parallel using all
+connected devices. Step results will be pickled to disk. Each step has a unique
+name. The result code will be ignored if the step name is listed in
+--flaky-steps.
+The buildbot will treat this step as a regular step, and will not process any
+graph data.
+
+- Then, with -print-step STEP_NAME: at this stage, we'll simply print the file
+with the step results previously saved. The buildbot will then process the graph
+data accordingly.
+
+The JSON steps file contains a dictionary in the format:
+{ "version": int,
+  "steps": {
+    "foo": {
+      "device_affinity": int,
+      "cmd": "script_to_execute foo"
+    },
+    "bar": {
+      "device_affinity": int,
+      "cmd": "script_to_execute bar"
+    }
+  }
+}
+
+The JSON flaky steps file contains a list with step names which results should
+be ignored:
+[
+  "step_name_foo",
+  "step_name_bar"
+]
+
+Note that script_to_execute necessarily have to take at least the following
+option:
+  --device: the serial number to be passed to all adb commands.
+"""
+
+import collections
+import io
+import json
+import logging
+import os
+import pickle
+import re
+import shutil
+import sys
+import tempfile
+import threading
+import time
+import zipfile
+
+from devil.android import battery_utils
+from devil.android import device_errors
+from devil.android import forwarder
+from devil.constants import exit_codes
+from devil.utils import cmd_helper
+from pylib import constants
+from pylib.base import base_test_result
+from pylib.base import base_test_runner
+from pylib.constants import host_paths
+
+
+# Regex for the master branch commit position.
+_GIT_CR_POS_RE = re.compile(r'^Cr-Commit-Position: refs/heads/master@{#(\d+)}$')
+
+
+def _GetChromiumRevision():
+  # pylint: disable=line-too-long
+  """Get the git hash and commit position of the chromium master branch.
+
+  See: https://chromium.googlesource.com/chromium/tools/build/+/master/scripts/slave/runtest.py#212
+
+  Returns:
+    A dictionary with 'revision' and 'commit_pos' keys.
+  """
+  # pylint: enable=line-too-long
+  status, output = cmd_helper.GetCmdStatusAndOutput(
+      ['git', 'log', '-n', '1', '--pretty=format:%H%n%B', 'HEAD'],
+      host_paths.DIR_SOURCE_ROOT)
+  revision = None
+  commit_pos = None
+  if not status:
+    lines = output.splitlines()
+    revision = lines[0]
+    for line in reversed(lines):
+      m = _GIT_CR_POS_RE.match(line.strip())
+      if m:
+        commit_pos = int(m.group(1))
+        break
+  return {'revision': revision, 'commit_pos': commit_pos}
+
+
+def GetPersistedResult(test_name):
+  file_name = os.path.join(constants.PERF_OUTPUT_DIR, test_name)
+  if not os.path.exists(file_name):
+    logging.error('File not found %s', file_name)
+    return None
+
+  with file(file_name, 'r') as f:
+    return pickle.loads(f.read())
+
+
+def OutputJsonList(json_input, json_output):
+  with file(json_input, 'r') as i:
+    all_steps = json.load(i)
+
+  step_values = []
+  for k, v in all_steps['steps'].iteritems():
+    data = {'test': k, 'device_affinity': v['device_affinity']}
+
+    persisted_result = GetPersistedResult(k)
+    if persisted_result:
+      data['start_time'] = persisted_result['start_time']
+      data['end_time'] = persisted_result['end_time']
+      data['total_time'] = persisted_result['total_time']
+      data['has_archive'] = persisted_result['archive_bytes'] is not None
+    step_values.append(data)
+
+  with file(json_output, 'w') as o:
+    o.write(json.dumps(step_values))
+  return 0
+
+
+def PrintTestOutput(test_name, json_file_name=None, archive_file_name=None):
+  """Helper method to print the output of previously executed test_name.
+
+  Args:
+    test_name: name of the test that has been previously executed.
+    json_file_name: name of the file to output chartjson data to.
+    archive_file_name: name of the file to write the compressed ZIP archive.
+
+  Returns:
+    exit code generated by the test step.
+  """
+  persisted_result = GetPersistedResult(test_name)
+  if not persisted_result:
+    return exit_codes.INFRA
+  logging.info('*' * 80)
+  logging.info('Output from:')
+  logging.info(persisted_result['cmd'])
+  logging.info('*' * 80)
+
+  output_formatted = ''
+  persisted_outputs = persisted_result['output']
+  for i in xrange(len(persisted_outputs)):
+    output_formatted += '\n\nOutput from run #%d:\n\n%s' % (
+        i, persisted_outputs[i])
+  print output_formatted
+
+  if json_file_name:
+    with file(json_file_name, 'w') as f:
+      f.write(persisted_result['chartjson'])
+
+  if archive_file_name:
+    if persisted_result['archive_bytes'] is not None:
+      with file(archive_file_name, 'wb') as f:
+        f.write(persisted_result['archive_bytes'])
+    else:
+      logging.error('The output dir was not archived.')
+
+  return persisted_result['exit_code']
+
+
+def PrintSummary(test_names):
+  logging.info('*' * 80)
+  logging.info('Sharding summary')
+  device_total_time = collections.defaultdict(int)
+  for test_name in test_names:
+    file_name = os.path.join(constants.PERF_OUTPUT_DIR, test_name)
+    if not os.path.exists(file_name):
+      logging.info('%s : No status file found', test_name)
+      continue
+    with file(file_name, 'r') as f:
+      result = pickle.loads(f.read())
+    logging.info('%s : exit_code=%d in %d secs at %s',
+                 result['name'], result['exit_code'], result['total_time'],
+                 result['device'])
+    device_total_time[result['device']] += result['total_time']
+  for device, device_time in device_total_time.iteritems():
+    logging.info('Total for device %s : %d secs', device, device_time)
+  logging.info('Total steps time: %d secs', sum(device_total_time.values()))
+
+
+class _HeartBeatLogger(object):
+  # How often to print the heartbeat on flush().
+  _PRINT_INTERVAL = 30.0
+
+  def __init__(self):
+    """A file-like class for keeping the buildbot alive."""
+    self._len = 0
+    self._tick = time.time()
+    self._stopped = threading.Event()
+    self._timer = threading.Thread(target=self._runner)
+    self._timer.start()
+
+  def _runner(self):
+    while not self._stopped.is_set():
+      self.flush()
+      self._stopped.wait(_HeartBeatLogger._PRINT_INTERVAL)
+
+  def write(self, data):
+    self._len += len(data)
+
+  def flush(self):
+    now = time.time()
+    if now - self._tick >= _HeartBeatLogger._PRINT_INTERVAL:
+      self._tick = now
+      print '--single-step output length %d' % self._len
+      sys.stdout.flush()
+
+  def stop(self):
+    self._stopped.set()
+
+
+class TestRunner(base_test_runner.BaseTestRunner):
+  def __init__(self, test_options, device, shard_index, max_shard, tests,
+      flaky_tests):
+    """A TestRunner instance runs a perf test on a single device.
+
+    Args:
+      test_options: A PerfOptions object.
+      device: Device to run the tests.
+      shard_index: the index of this device.
+      max_shards: the maximum shard index.
+      tests: a dict mapping test_name to command.
+      flaky_tests: a list of flaky test_name.
+    """
+    super(TestRunner, self).__init__(device, None)
+    self._options = test_options
+    self._shard_index = shard_index
+    self._max_shard = max_shard
+    self._tests = tests
+    self._flaky_tests = flaky_tests
+    self._output_dir = None
+    self._device_battery = battery_utils.BatteryUtils(self.device)
+
+  @staticmethod
+  def _SaveResult(result):
+    pickled = os.path.join(constants.PERF_OUTPUT_DIR, result['name'])
+    if os.path.exists(pickled):
+      with file(pickled, 'r') as f:
+        previous = pickle.loads(f.read())
+        result['output'] = previous['output'] + result['output']
+
+    with file(pickled, 'w') as f:
+      f.write(pickle.dumps(result))
+
+  def _CheckDeviceAffinity(self, test_name):
+    """Returns True if test_name has affinity for this shard."""
+    affinity = (self._tests['steps'][test_name]['device_affinity'] %
+                self._max_shard)
+    if self._shard_index == affinity:
+      return True
+    logging.info('Skipping %s on %s (affinity is %s, device is %s)',
+                 test_name, self.device_serial, affinity, self._shard_index)
+    return False
+
+  def _CleanupOutputDirectory(self):
+    if self._output_dir:
+      shutil.rmtree(self._output_dir, ignore_errors=True)
+      self._output_dir = None
+
+  def _ReadChartjsonOutput(self):
+    if not self._output_dir:
+      return ''
+
+    json_output_path = os.path.join(self._output_dir, 'results-chart.json')
+    try:
+      with open(json_output_path) as f:
+        return f.read()
+    except IOError:
+      logging.exception('Exception when reading chartjson.')
+      logging.error('This usually means that telemetry did not run, so it could'
+                    ' not generate the file. Please check the device running'
+                    ' the test.')
+      return ''
+
+  def _WriteBuildBotJson(self):
+    """Write metadata about the buildbot environment to the output dir."""
+    data = {
+      'chromium': _GetChromiumRevision(),
+      'environment': dict(os.environ)}
+    logging.info('BuildBot environment: %s', data)
+    with open(os.path.join(self._output_dir, 'buildbot.json'), 'w') as f:
+      json.dump(data, f, sort_keys=True, indent=2, separators=(',', ': '))
+
+  def _ArchiveOutputDir(self):
+    """Archive all files in the output dir, and return as compressed bytes."""
+    with io.BytesIO() as archive:
+      with zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED) as contents:
+        num_files = 0
+        for absdir, _, files in os.walk(self._output_dir):
+          reldir = os.path.relpath(absdir, self._output_dir)
+          for filename in files:
+            src_path = os.path.join(absdir, filename)
+            # We use normpath to turn './file.txt' into just 'file.txt'.
+            dst_path = os.path.normpath(os.path.join(reldir, filename))
+            contents.write(src_path, dst_path)
+            num_files += 1
+      if num_files:
+        logging.info('%d files in the output dir were archived.', num_files)
+      else:
+        logging.warning('No files in the output dir. Archive is empty.')
+      return archive.getvalue()
+
+  def _LaunchPerfTest(self, test_name):
+    """Runs a perf test.
+
+    Args:
+      test_name: the name of the test to be executed.
+
+    Returns:
+      A tuple containing (Output, base_test_result.ResultType)
+    """
+    if not self._CheckDeviceAffinity(test_name):
+      return '', base_test_result.ResultType.PASS
+
+    try:
+      logging.warning('Unmapping device ports')
+      forwarder.Forwarder.UnmapAllDevicePorts(self.device)
+      self.device.RestartAdbd()
+    except Exception as e: # pylint: disable=broad-except
+      logging.error('Exception when tearing down device %s', e)
+
+    test_config = self._tests['steps'][test_name]
+    cmd = ('%s --device %s' % (test_config['cmd'], self.device_serial))
+
+    if (self._options.collect_chartjson_data
+        or test_config.get('archive_output_dir')):
+      self._output_dir = tempfile.mkdtemp()
+      self._WriteBuildBotJson()
+      cmd = cmd + ' --output-dir=%s' % self._output_dir
+
+    logging.info(
+        'temperature: %s (0.1 C)',
+        str(self._device_battery.GetBatteryInfo().get('temperature')))
+    if self._options.max_battery_temp:
+      self._device_battery.LetBatteryCoolToTemperature(
+          self._options.max_battery_temp)
+
+    logging.info('Charge level: %s%%',
+        str(self._device_battery.GetBatteryInfo().get('level')))
+    if self._options.min_battery_level:
+      self._device_battery.ChargeDeviceToLevel(
+          self._options.min_battery_level)
+    self.device.SetScreen(True)
+
+    logging.info('%s : %s', test_name, cmd)
+    start_time = time.time()
+
+    timeout = test_config.get('timeout', 3600)
+    if self._options.no_timeout:
+      timeout = None
+    logging.info('Timeout for %s test: %s', test_name, timeout)
+    full_cmd = cmd
+    if self._options.dry_run:
+      full_cmd = 'echo %s' % cmd
+
+    logfile = sys.stdout
+    archive_bytes = None
+    if self._options.single_step:
+      # Just print a heart-beat so that the outer buildbot scripts won't timeout
+      # without response.
+      logfile = _HeartBeatLogger()
+    cwd = os.path.abspath(host_paths.DIR_SOURCE_ROOT)
+    if full_cmd.startswith('src/'):
+      cwd = os.path.abspath(os.path.join(host_paths.DIR_SOURCE_ROOT, os.pardir))
+    try:
+      exit_code, output = cmd_helper.GetCmdStatusAndOutputWithTimeout(
+          full_cmd, timeout, cwd=cwd, shell=True, logfile=logfile)
+      json_output = self._ReadChartjsonOutput()
+      if test_config.get('archive_output_dir'):
+        archive_bytes = self._ArchiveOutputDir()
+    except cmd_helper.TimeoutError as e:
+      exit_code = -1
+      output = e.output
+      json_output = ''
+    finally:
+      self._CleanupOutputDirectory()
+      if self._options.single_step:
+        logfile.stop()
+    end_time = time.time()
+    if exit_code is None:
+      exit_code = -1
+    logging.info('%s : exit_code=%d in %d secs at %s',
+                 test_name, exit_code, end_time - start_time,
+                 self.device_serial)
+
+    if exit_code == 0:
+      result_type = base_test_result.ResultType.PASS
+    else:
+      result_type = base_test_result.ResultType.FAIL
+      # Since perf tests use device affinity, give the device a chance to
+      # recover if it is offline after a failure. Otherwise, the master sharder
+      # will remove it from the pool and future tests on this device will fail.
+      try:
+        self.device.WaitUntilFullyBooted(timeout=120)
+      except device_errors.CommandTimeoutError as e:
+        logging.error('Device failed to return after %s: %s', test_name, e)
+
+    actual_exit_code = exit_code
+    if test_name in self._flaky_tests:
+      # The exit_code is used at the second stage when printing the
+      # test output. If the test is flaky, force to "0" to get that step green
+      # whilst still gathering data to the perf dashboards.
+      # The result_type is used by the test_dispatcher to retry the test.
+      exit_code = 0
+
+    persisted_result = {
+        'name': test_name,
+        'output': [output],
+        'chartjson': json_output,
+        'archive_bytes': archive_bytes,
+        'exit_code': exit_code,
+        'actual_exit_code': actual_exit_code,
+        'result_type': result_type,
+        'start_time': start_time,
+        'end_time': end_time,
+        'total_time': end_time - start_time,
+        'device': self.device_serial,
+        'cmd': cmd,
+    }
+    self._SaveResult(persisted_result)
+
+    return (output, result_type)
+
+  def RunTest(self, test_name):
+    """Run a perf test on the device.
+
+    Args:
+      test_name: String to use for logging the test result.
+
+    Returns:
+      A tuple of (TestRunResults, retry).
+    """
+    _, result_type = self._LaunchPerfTest(test_name)
+    results = base_test_result.TestRunResults()
+    results.AddResult(base_test_result.BaseTestResult(test_name, result_type))
+    retry = None
+    if not results.DidRunPass():
+      retry = test_name
+    return results, retry
diff --git a/build/android/pylib/perf/thermal_throttle.py b/build/android/pylib/perf/thermal_throttle.py
new file mode 100644
index 0000000..0473da8
--- /dev/null
+++ b/build/android/pylib/perf/thermal_throttle.py
@@ -0,0 +1,8 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=unused-wildcard-import
+# pylint: disable=wildcard-import
+
+from devil.android.perf.thermal_throttle import *
diff --git a/build/android/pylib/pexpect.py b/build/android/pylib/pexpect.py
new file mode 100644
index 0000000..cf59fb0
--- /dev/null
+++ b/build/android/pylib/pexpect.py
@@ -0,0 +1,21 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+from __future__ import absolute_import
+
+import os
+import sys
+
+_CHROME_SRC = os.path.join(
+    os.path.abspath(os.path.dirname(__file__)), '..', '..', '..')
+
+_PEXPECT_PATH = os.path.join(_CHROME_SRC, 'third_party', 'pexpect')
+if _PEXPECT_PATH not in sys.path:
+  sys.path.append(_PEXPECT_PATH)
+
+# pexpect is not available on all platforms. We allow this file to be imported
+# on platforms without pexpect and only fail when pexpect is actually used.
+try:
+  from pexpect import * # pylint: disable=W0401,W0614
+except ImportError:
+  pass
diff --git a/build/android/pylib/remote/__init__.py b/build/android/pylib/remote/__init__.py
new file mode 100644
index 0000000..4d6aabb
--- /dev/null
+++ b/build/android/pylib/remote/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/build/android/pylib/remote/device/__init__.py b/build/android/pylib/remote/device/__init__.py
new file mode 100644
index 0000000..4d6aabb
--- /dev/null
+++ b/build/android/pylib/remote/device/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/build/android/pylib/remote/device/appurify_constants.py b/build/android/pylib/remote/device/appurify_constants.py
new file mode 100644
index 0000000..cf99bb6
--- /dev/null
+++ b/build/android/pylib/remote/device/appurify_constants.py
@@ -0,0 +1,58 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Defines a set of constants specific to appurify."""
+
+# Appurify network config constants.
+class NETWORK(object):
+  WIFI_1_BAR = 1
+  SPRINT_4G_LTE_4_BARS = 2
+  SPRINT_3G_5_BARS = 3
+  SPRINT_3G_4_BARS = 4
+  SPRINT_3G_3_BARS = 5
+  SPRINT_3G_2_BARS = 6
+  SPRINT_3G_1_BAR = 7
+  SPRING_4G_1_BAR = 8
+  VERIZON_3G_5_BARS = 9
+  VERIZON_3G_4_BARS = 10
+  VERIZON_3G_3_BARS = 11
+  VERIZON_3G_2_BARS = 12
+  VERIZON_3G_1_BAR = 13
+  VERIZON_4G_1_BAR = 14
+  ATANDT_3G_5_BARS = 15
+  ATANDT_3G_4_BARS = 16
+  ATANDT_3G_3_BARS = 17
+  ATANDT_3G_2_BARS = 18
+  ATANDT_3G_1_BAR = 19
+  GENERIC_2G_4_BARS = 20
+  GENERIC_2G_3_BARS = 21
+  GENERIC_EVOLVED_EDGE = 22
+  GENERIC_GPRS = 23
+  GENERIC_ENHANCED_GPRS = 24
+  GENERIC_LTE = 25
+  GENERIC_HIGH_LATENCY_DNS = 26
+  GENERIC_100_PERCENT_PACKET_LOSS = 27
+  ATANDT_HSPA_PLUS = 28
+  ATANDT_4G_LTE_4_BARS = 29
+  VERIZON_4G_LTE_4_BARS = 30
+  GENERIC_DIGITAL_SUBSCRIBE_LINE = 31
+  WIFI_STARBUCKS_3_BARS = 32
+  WIFI_STARBUCKS_4_BARS = 33
+  WIFI_STARBUCKS_HIGH_TRAFFIC = 34
+  WIFI_TARGET_1_BAR = 35
+  WIFI_TARGET_3_BARS = 36
+  WIFI_TARGET_4_BARS = 37
+  PUBLIC_WIFI_MCDONALDS_5_BARS = 38
+  PUBLIC_WIFI_MCDONALDS_4_BARS = 39
+  PUBLIC_WIFI_MCDONALDS_2_BARS = 40
+  PUBLIC_WIFI_MCDONALDS_1_BAR = 41
+  PUBLIC_WIFI_KOHLS_5_BARS = 42
+  PUBLIC_WIFI_KOHLS_4_BARS = 43
+  PUBLIC_WIFI_KOHLS_2_BARS = 44
+  PUBLIC_WIFI_ATANDT_5_BARS = 45
+  PUBLIC_WIFI_ATANDT_4_BARS = 46
+  PUBLIC_WIFI_ATANDT_2_BARS = 47
+  PUBLIC_WIFI_ATANDT_1_BAR = 48
+  BOINGO = 49
+
diff --git a/build/android/pylib/remote/device/appurify_sanitized.py b/build/android/pylib/remote/device/appurify_sanitized.py
new file mode 100644
index 0000000..48736d5
--- /dev/null
+++ b/build/android/pylib/remote/device/appurify_sanitized.py
@@ -0,0 +1,43 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import contextlib
+import logging
+import os
+
+from pylib.constants import host_paths
+
+_REQUESTS_PATH = os.path.join(
+    host_paths.DIR_SOURCE_ROOT, 'third_party', 'requests', 'src')
+_APPURIFY_PYTHON_PATH = os.path.join(
+    host_paths.DIR_SOURCE_ROOT, 'third_party', 'appurify-python', 'src')
+
+with host_paths.SysPath(_REQUESTS_PATH), (
+     host_paths.SysPath(_APPURIFY_PYTHON_PATH)):
+
+  handlers_before = list(logging.getLogger().handlers)
+
+  import appurify.api # pylint: disable=import-error
+  import appurify.utils # pylint: disable=import-error
+
+  handlers_after = list(logging.getLogger().handlers)
+  new_handler = list(set(handlers_after) - set(handlers_before))
+  while new_handler:
+    logging.info("Removing logging handler.")
+    logging.getLogger().removeHandler(new_handler.pop())
+
+  api = appurify.api
+  utils = appurify.utils
+
+# This is not thread safe. If multiple threads are ever supported with appurify
+# this may cause logging messages to go missing.
+@contextlib.contextmanager
+def SanitizeLogging(verbose_count, level):
+  if verbose_count < 2:
+    logging.disable(level)
+    yield True
+    logging.disable(logging.NOTSET)
+  else:
+    yield False
+
diff --git a/build/android/pylib/remote/device/dummy/BUILD.gn b/build/android/pylib/remote/device/dummy/BUILD.gn
new file mode 100644
index 0000000..54ca275
--- /dev/null
+++ b/build/android/pylib/remote/device/dummy/BUILD.gn
@@ -0,0 +1,14 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/config/android/config.gni")
+import("//build/config/android/rules.gni")
+
+# GYP: //build/android/pylib/remote/device/dummy/dummy.gyp:remote_device_dummy_apk
+android_apk("remote_device_dummy_apk") {
+  android_manifest = "//build/android/AndroidManifest.xml"
+  java_files = [ "src/org/chromium/dummy/Dummy.java" ]
+  apk_name = "remote_device_dummy"
+  testonly = true
+}
diff --git a/build/android/pylib/remote/device/dummy/dummy.gyp b/build/android/pylib/remote/device/dummy/dummy.gyp
new file mode 100644
index 0000000..a7c451f
--- /dev/null
+++ b/build/android/pylib/remote/device/dummy/dummy.gyp
@@ -0,0 +1,48 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Running gtests on a remote device via am instrument requires both an "app"
+# APK and a "test" APK with different package names. Our gtests only use one
+# APK, so we build a dummy APK to upload as the app.
+
+{
+  'variables': {
+    'remote_device_dummy_apk_name': 'remote_device_dummy',
+    'remote_device_dummy_apk_path': '<(PRODUCT_DIR)/apks/<(remote_device_dummy_apk_name).apk',
+  },
+  'targets': [
+    {
+      # GN: //build/android/pylib/remote/device/dummy:remote_device_dummy_apk
+      'target_name': 'remote_device_dummy_apk',
+      'type': 'none',
+      'variables': {
+        'apk_name': '<(remote_device_dummy_apk_name)',
+        'final_apk_path': '<(remote_device_dummy_apk_path)',
+        'java_in_dir': '.',
+        'never_lint': 1,
+        'android_manifest_path': '../../../../../../build/android/AndroidManifest.xml',
+      },
+      'includes': [
+        '../../../../../../build/java_apk.gypi',
+      ]
+    },
+    {
+      'target_name': 'require_remote_device_dummy_apk',
+      'message': 'Making sure <(remote_device_dummy_apk_path) has been built.',
+      'type': 'none',
+      'variables': {
+        'required_file': '<(PRODUCT_DIR)/remote_device_dummy_apk/<(remote_device_dummy_apk_name).apk.required',
+      },
+      'inputs': [
+        '<(remote_device_dummy_apk_path)',
+      ],
+      'outputs': [
+        '<(required_file)',
+      ],
+      'action': [
+        'python', '../../build/android/gyp/touch.py', '<(required_file)',
+      ],
+    }
+  ]
+}
diff --git a/build/android/pylib/remote/device/dummy/src/org/chromium/dummy/Dummy.java b/build/android/pylib/remote/device/dummy/src/org/chromium/dummy/Dummy.java
new file mode 100644
index 0000000..1281b39
--- /dev/null
+++ b/build/android/pylib/remote/device/dummy/src/org/chromium/dummy/Dummy.java
@@ -0,0 +1,9 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.dummy;
+
+/** Does nothing. */
+class Dummy {}
+
diff --git a/build/android/pylib/remote/device/remote_device_environment.py b/build/android/pylib/remote/device/remote_device_environment.py
new file mode 100644
index 0000000..7923f3a
--- /dev/null
+++ b/build/android/pylib/remote/device/remote_device_environment.py
@@ -0,0 +1,364 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Environment setup and teardown for remote devices."""
+
+import distutils.version
+import json
+import logging
+import os
+import random
+import sys
+
+from devil.utils import reraiser_thread
+from devil.utils import timeout_retry
+from pylib.base import environment
+from pylib.remote.device import appurify_sanitized
+from pylib.remote.device import remote_device_helper
+
+class RemoteDeviceEnvironment(environment.Environment):
+  """An environment for running on remote devices."""
+
+  _ENV_KEY = 'env'
+  _DEVICE_KEY = 'device'
+  _DEFAULT_RETRIES = 0
+
+  def __init__(self, args, error_func):
+    """Constructor.
+
+    Args:
+      args: Command line arguments.
+      error_func: error to show when using bad command line arguments.
+    """
+    super(RemoteDeviceEnvironment, self).__init__()
+    self._access_token = None
+    self._device = None
+    self._device_type = args.device_type
+    self._verbose_count = args.verbose_count
+    self._timeouts = {
+        'queueing': 60 * 10,
+        'installing': 60 * 10,
+        'in-progress': 60 * 30,
+        'unknown': 60 * 5
+    }
+    # Example config file:
+    # {
+    #   "remote_device": ["Galaxy S4", "Galaxy S3"],
+    #   "remote_device_os": ["4.4.2", "4.4.4"],
+    #   "remote_device_minimum_os": "4.4.2",
+    #   "api_address": "www.example.com",
+    #   "api_port": "80",
+    #   "api_protocol": "http",
+    #   "api_secret": "apisecret",
+    #   "api_key": "apikey",
+    #   "timeouts": {
+    #     "queueing": 600,
+    #     "installing": 600,
+    #     "in-progress": 1800,
+    #     "unknown": 300
+    #   }
+    # }
+    if args.remote_device_file:
+      with open(args.remote_device_file) as device_file:
+        device_json = json.load(device_file)
+    else:
+      device_json = {}
+
+    self._api_address = device_json.get('api_address', None)
+    self._api_key = device_json.get('api_key', None)
+    self._api_port = device_json.get('api_port', None)
+    self._api_protocol = device_json.get('api_protocol', None)
+    self._api_secret = device_json.get('api_secret', None)
+    self._device_oem = device_json.get('device_oem', None)
+    self._device_type = device_json.get('device_type', 'Android')
+    self._network_config = device_json.get('network_config', None)
+    self._remote_device = device_json.get('remote_device', None)
+    self._remote_device_minimum_os = device_json.get(
+        'remote_device_minimum_os', None)
+    self._remote_device_os = device_json.get('remote_device_os', None)
+    self._remote_device_timeout = device_json.get(
+        'remote_device_timeout', None)
+    self._results_path = device_json.get('results_path', None)
+    self._runner_package = device_json.get('runner_package', None)
+    self._runner_type = device_json.get('runner_type', None)
+    self._timeouts.update(device_json.get('timeouts', {}))
+
+    def command_line_override(
+        file_value, cmd_line_value, desc, print_value=True):
+      if cmd_line_value:
+        if file_value and file_value != cmd_line_value:
+          if print_value:
+            logging.info('Overriding %s from %s to %s',
+                         desc, file_value, cmd_line_value)
+          else:
+            logging.info('overriding %s', desc)
+        return cmd_line_value
+      return file_value
+
+    self._api_address = command_line_override(
+        self._api_address, args.api_address, 'api_address')
+    self._api_port = command_line_override(
+        self._api_port, args.api_port, 'api_port')
+    self._api_protocol = command_line_override(
+        self._api_protocol, args.api_protocol, 'api_protocol')
+    self._device_oem = command_line_override(
+        self._device_oem, args.device_oem, 'device_oem')
+    self._device_type = command_line_override(
+        self._device_type, args.device_type, 'device_type')
+    self._network_config = command_line_override(
+        self._network_config, args.network_config, 'network_config')
+    self._remote_device = command_line_override(
+        self._remote_device, args.remote_device, 'remote_device')
+    self._remote_device_minimum_os = command_line_override(
+        self._remote_device_minimum_os, args.remote_device_minimum_os,
+        'remote_device_minimum_os')
+    self._remote_device_os = command_line_override(
+        self._remote_device_os, args.remote_device_os, 'remote_device_os')
+    self._remote_device_timeout = command_line_override(
+        self._remote_device_timeout, args.remote_device_timeout,
+        'remote_device_timeout')
+    self._results_path = command_line_override(
+        self._results_path, args.results_path, 'results_path')
+    self._runner_package = command_line_override(
+        self._runner_package, args.runner_package, 'runner_package')
+    self._runner_type = command_line_override(
+        self._runner_type, args.runner_type, 'runner_type')
+    self._timeouts["in-progress"] = command_line_override(
+        self._timeouts["in-progress"], args.test_timeout, 'test_timeout')
+
+    if args.api_key_file:
+      with open(args.api_key_file) as api_key_file:
+        temp_key = api_key_file.read().strip()
+        self._api_key = command_line_override(
+            self._api_key, temp_key, 'api_key', print_value=False)
+    self._api_key = command_line_override(
+        self._api_key, args.api_key, 'api_key', print_value=False)
+
+    if args.api_secret_file:
+      with open(args.api_secret_file) as api_secret_file:
+        temp_secret = api_secret_file.read().strip()
+        self._api_secret = command_line_override(
+            self._api_secret, temp_secret, 'api_secret', print_value=False)
+    self._api_secret = command_line_override(
+        self._api_secret, args.api_secret, 'api_secret', print_value=False)
+
+    if not self._api_address:
+      error_func('Must set api address with --api-address'
+                 ' or in --remote-device-file.')
+    if not self._api_key:
+      error_func('Must set api key with --api-key, --api-key-file'
+                 ' or in --remote-device-file')
+    if not self._api_port:
+      error_func('Must set api port with --api-port'
+                 ' or in --remote-device-file')
+    if not self._api_protocol:
+      error_func('Must set api protocol with --api-protocol'
+                 ' or in --remote-device-file. Example: http')
+    if not self._api_secret:
+      error_func('Must set api secret with --api-secret, --api-secret-file'
+                 ' or in --remote-device-file')
+
+    logging.info('Api address: %s', self._api_address)
+    logging.info('Api port: %s', self._api_port)
+    logging.info('Api protocol: %s', self._api_protocol)
+    logging.info('Remote device: %s', self._remote_device)
+    logging.info('Remote device minimum OS: %s',
+                 self._remote_device_minimum_os)
+    logging.info('Remote device OS: %s', self._remote_device_os)
+    logging.info('Remote device OEM: %s', self._device_oem)
+    logging.info('Remote device type: %s', self._device_type)
+    logging.info('Remote device timout: %s', self._remote_device_timeout)
+    logging.info('Results Path: %s', self._results_path)
+    logging.info('Runner package: %s', self._runner_package)
+    logging.info('Runner type: %s', self._runner_type)
+    logging.info('Timeouts: %s', self._timeouts)
+
+    if not args.trigger and not args.collect:
+      self._trigger = True
+      self._collect = True
+    else:
+      self._trigger = args.trigger
+      self._collect = args.collect
+
+  def SetUp(self):
+    """Set up the test environment."""
+    os.environ['APPURIFY_API_PROTO'] = self._api_protocol
+    os.environ['APPURIFY_API_HOST'] = self._api_address
+    os.environ['APPURIFY_API_PORT'] = self._api_port
+    os.environ['APPURIFY_STATUS_BASE_URL'] = 'none'
+    self._GetAccessToken()
+    if self._trigger:
+      self._SelectDevice()
+
+  def TearDown(self):
+    """Teardown the test environment."""
+    self._RevokeAccessToken()
+
+  def __enter__(self):
+    """Set up the test run when used as a context manager."""
+    try:
+      self.SetUp()
+      return self
+    except:
+      self.__exit__(*sys.exc_info())
+      raise
+
+  def __exit__(self, exc_type, exc_val, exc_tb):
+    """Tears down the test run when used as a context manager."""
+    self.TearDown()
+
+  def DumpTo(self, persisted_data):
+    env_data = {
+      self._DEVICE_KEY: self._device,
+    }
+    persisted_data[self._ENV_KEY] = env_data
+
+  def LoadFrom(self, persisted_data):
+    env_data = persisted_data[self._ENV_KEY]
+    self._device = env_data[self._DEVICE_KEY]
+
+  def _GetAccessToken(self):
+    """Generates access token for remote device service."""
+    logging.info('Generating remote service access token')
+    with appurify_sanitized.SanitizeLogging(self._verbose_count,
+                                            logging.WARNING):
+      access_token_results = appurify_sanitized.api.access_token_generate(
+          self._api_key, self._api_secret)
+    remote_device_helper.TestHttpResponse(access_token_results,
+                                          'Unable to generate access token.')
+    self._access_token = access_token_results.json()['response']['access_token']
+
+  def _RevokeAccessToken(self):
+    """Destroys access token for remote device service."""
+    logging.info('Revoking remote service access token')
+    with appurify_sanitized.SanitizeLogging(self._verbose_count,
+                                            logging.WARNING):
+      revoke_token_results = appurify_sanitized.api.access_token_revoke(
+          self._access_token)
+    remote_device_helper.TestHttpResponse(revoke_token_results,
+                                          'Unable to revoke access token.')
+
+  def _SelectDevice(self):
+    if self._remote_device_timeout:
+      try:
+        timeout_retry.Run(self._FindDeviceWithTimeout,
+                          self._remote_device_timeout, self._DEFAULT_RETRIES)
+      except reraiser_thread.TimeoutError:
+        self._NoDeviceFound()
+    else:
+      if not self._FindDevice():
+        self._NoDeviceFound()
+
+  def _FindDevice(self):
+    """Find which device to use."""
+    logging.info('Finding device to run tests on.')
+    device_list = self._GetDeviceList()
+    random.shuffle(device_list)
+    for device in device_list:
+      if device['os_name'] != self._device_type:
+        continue
+      if self._remote_device and device['name'] not in self._remote_device:
+        continue
+      if (self._remote_device_os
+          and device['os_version'] not in self._remote_device_os):
+        continue
+      if self._device_oem and device['brand'] not in self._device_oem:
+        continue
+      if (self._remote_device_minimum_os
+          and distutils.version.LooseVersion(device['os_version'])
+          < distutils.version.LooseVersion(self._remote_device_minimum_os)):
+        continue
+      if device['has_available_device']:
+        logging.info('Found device: %s %s',
+                     device['name'], device['os_version'])
+        self._device = device
+        return True
+    return False
+
+  def _FindDeviceWithTimeout(self):
+    """Find which device to use with timeout."""
+    timeout_retry.WaitFor(self._FindDevice, wait_period=1)
+
+  def _PrintAvailableDevices(self, device_list):
+    def compare_devices(a, b):
+      for key in ('os_version', 'name'):
+        c = cmp(a[key], b[key])
+        if c:
+          return c
+      return 0
+
+    logging.critical('Available %s Devices:', self._device_type)
+    logging.critical(
+        '  %s %s %s %s %s',
+        'OS'.ljust(10),
+        'Device Name'.ljust(30),
+        'Available'.ljust(10),
+        'Busy'.ljust(10),
+        'All'.ljust(10))
+    devices = (d for d in device_list if d['os_name'] == self._device_type)
+    for d in sorted(devices, compare_devices):
+      logging.critical(
+          '  %s %s %s %s %s',
+          d['os_version'].ljust(10),
+          d['name'].ljust(30),
+          str(d['available_devices_count']).ljust(10),
+          str(d['busy_devices_count']).ljust(10),
+          str(d['all_devices_count']).ljust(10))
+
+  def _GetDeviceList(self):
+    with appurify_sanitized.SanitizeLogging(self._verbose_count,
+                                            logging.WARNING):
+      dev_list_res = appurify_sanitized.api.devices_list(self._access_token)
+    remote_device_helper.TestHttpResponse(dev_list_res,
+                                         'Unable to generate access token.')
+    return dev_list_res.json()['response']
+
+  def _NoDeviceFound(self):
+    self._PrintAvailableDevices(self._GetDeviceList())
+    raise remote_device_helper.RemoteDeviceError(
+        'No device found.', is_infra_error=True)
+
+  @property
+  def collect(self):
+    return self._collect
+
+  @property
+  def device_type_id(self):
+    return self._device['device_type_id']
+
+  @property
+  def network_config(self):
+    return self._network_config
+
+  @property
+  def results_path(self):
+    return self._results_path
+
+  @property
+  def runner_package(self):
+    return self._runner_package
+
+  @property
+  def runner_type(self):
+    return self._runner_type
+
+  @property
+  def timeouts(self):
+    return self._timeouts
+
+  @property
+  def token(self):
+    return self._access_token
+
+  @property
+  def trigger(self):
+    return self._trigger
+
+  @property
+  def verbose_count(self):
+    return self._verbose_count
+
+  @property
+  def device_type(self):
+    return self._device_type
diff --git a/build/android/pylib/remote/device/remote_device_gtest_run.py b/build/android/pylib/remote/device/remote_device_gtest_run.py
new file mode 100644
index 0000000..0cfe717
--- /dev/null
+++ b/build/android/pylib/remote/device/remote_device_gtest_run.py
@@ -0,0 +1,89 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Run specific test on specific environment."""
+
+import logging
+import os
+import tempfile
+
+from pylib import constants
+from pylib.base import base_test_result
+from pylib.gtest import gtest_test_instance
+from pylib.remote.device import remote_device_test_run
+
+
+_EXTRA_COMMAND_LINE_FILE = (
+    'org.chromium.native_test.NativeTestActivity.CommandLineFile')
+
+
+class RemoteDeviceGtestTestRun(remote_device_test_run.RemoteDeviceTestRun):
+  """Run gtests and uirobot tests on a remote device."""
+
+  DEFAULT_RUNNER_PACKAGE = (
+      'org.chromium.native_test.NativeTestInstrumentationTestRunner')
+
+  #override
+  def TestPackage(self):
+    return self._test_instance.suite
+
+  #override
+  def _TriggerSetUp(self):
+    """Set up the triggering of a test run."""
+    logging.info('Triggering test run.')
+
+    if self._env.runner_type:
+      logging.warning('Ignoring configured runner_type "%s"',
+                      self._env.runner_type)
+
+    if not self._env.runner_package:
+      runner_package = self.DEFAULT_RUNNER_PACKAGE
+      logging.info('Using default runner package: %s',
+                   self.DEFAULT_RUNNER_PACKAGE)
+    else:
+      runner_package = self._env.runner_package
+
+    dummy_app_path = os.path.join(
+        constants.GetOutDirectory(), 'apks', 'remote_device_dummy.apk')
+
+    # pylint: disable=protected-access
+    with tempfile.NamedTemporaryFile(suffix='.flags.txt') as flag_file:
+      env_vars = dict(self._test_instance.extras)
+      if gtest_test_instance.EXTRA_SHARD_NANO_TIMEOUT not in env_vars:
+        env_vars[gtest_test_instance.EXTRA_SHARD_NANO_TIMEOUT] = int(
+            self._test_instance.shard_timeout * 1e9)
+
+      flags = []
+
+      filter_string = self._test_instance._GenerateDisabledFilterString(None)
+      if filter_string:
+        flags.append('--gtest_filter=%s' % filter_string)
+
+      if self._test_instance.test_arguments:
+        flags.append(self._test_instance.test_arguments)
+
+      if flags:
+        flag_file.write('_ ' + ' '.join(flags))
+        flag_file.flush()
+        env_vars[_EXTRA_COMMAND_LINE_FILE] = os.path.basename(flag_file.name)
+        self._test_instance._data_deps.append(
+            (os.path.abspath(flag_file.name), None))
+      self._AmInstrumentTestSetup(
+          dummy_app_path, self._test_instance.apk, runner_package,
+          environment_variables=env_vars)
+
+  _INSTRUMENTATION_STREAM_LEADER = 'INSTRUMENTATION_STATUS: stream='
+
+  #override
+  def _ParseTestResults(self):
+    logging.info('Parsing results from stdout.')
+    results = base_test_result.TestRunResults()
+    output = self._results['results']['output'].splitlines()
+    output = (l[len(self._INSTRUMENTATION_STREAM_LEADER):] for l in output
+              if l.startswith(self._INSTRUMENTATION_STREAM_LEADER))
+    results_list = self._test_instance.ParseGTestOutput(output)
+    results.AddResults(results_list)
+
+    self._DetectPlatformErrors(results)
+    return results
diff --git a/build/android/pylib/remote/device/remote_device_helper.py b/build/android/pylib/remote/device/remote_device_helper.py
new file mode 100644
index 0000000..1b02207
--- /dev/null
+++ b/build/android/pylib/remote/device/remote_device_helper.py
@@ -0,0 +1,24 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Common functions and Exceptions for remote_device_*"""
+
+from devil import base_error
+
+
+class RemoteDeviceError(base_error.BaseError):
+  """Exception to throw when problems occur with remote device service."""
+  pass
+
+
+def TestHttpResponse(response, error_msg):
+  """Checks the Http response from remote device service.
+
+  Args:
+      response: response dict from the remote device service.
+      error_msg: Error message to display if bad response is seen.
+  """
+  if response.status_code != 200:
+    raise RemoteDeviceError(
+        '%s (%d: %s)' % (error_msg, response.status_code, response.reason))
diff --git a/build/android/pylib/remote/device/remote_device_instrumentation_test_run.py b/build/android/pylib/remote/device/remote_device_instrumentation_test_run.py
new file mode 100644
index 0000000..ee01857
--- /dev/null
+++ b/build/android/pylib/remote/device/remote_device_instrumentation_test_run.py
@@ -0,0 +1,74 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Run specific test on specific environment."""
+
+import logging
+import os
+import tempfile
+
+from pylib.base import base_test_result
+from pylib.remote.device import remote_device_test_run
+
+
+class RemoteDeviceInstrumentationTestRun(
+    remote_device_test_run.RemoteDeviceTestRun):
+  """Run instrumentation tests on a remote device."""
+
+  #override
+  def TestPackage(self):
+    return self._test_instance.test_package
+
+  #override
+  def _TriggerSetUp(self):
+    """Set up the triggering of a test run."""
+    logging.info('Triggering test run.')
+
+    # pylint: disable=protected-access
+    with tempfile.NamedTemporaryFile(suffix='.txt') as test_list_file:
+      tests = self._test_instance.GetTests()
+      logging.debug('preparing to run %d instrumentation tests remotely:',
+                    len(tests))
+      for t in tests:
+        test_name = '%s#%s' % (t['class'], t['method'])
+        logging.debug('  %s', test_name)
+        test_list_file.write('%s\n' % test_name)
+      test_list_file.flush()
+      self._test_instance._data_deps.append(
+          (os.path.abspath(test_list_file.name), None))
+
+      env_vars = self._test_instance.GetDriverEnvironmentVars(
+          test_list_file_path=test_list_file.name)
+
+      logging.debug('extras:')
+      for k, v in env_vars.iteritems():
+        logging.debug('  %s: %s', k, v)
+
+      self._AmInstrumentTestSetup(
+          self._test_instance.apk_under_test,
+          self._test_instance.driver_apk,
+          self._test_instance.driver_name,
+          environment_variables=env_vars,
+          extra_apks=([self._test_instance.test_apk] +
+                      self._test_instance.additional_apks))
+
+  #override
+  def _ParseTestResults(self):
+    logging.info('Parsing results from stdout.')
+    r = base_test_result.TestRunResults()
+    result_code, result_bundle, statuses = (
+        self._test_instance.ParseAmInstrumentRawOutput(
+            self._results['results']['output'].splitlines()))
+    result = self._test_instance.GenerateTestResults(
+        result_code, result_bundle, statuses, 0, 0)
+
+    if isinstance(result, base_test_result.BaseTestResult):
+      r.AddResult(result)
+    elif isinstance(result, list):
+      r.AddResults(result)
+    else:
+      raise Exception('Unexpected result type: %s' % type(result).__name__)
+
+    self._DetectPlatformErrors(r)
+    return r
diff --git a/build/android/pylib/remote/device/remote_device_test_run.py b/build/android/pylib/remote/device/remote_device_test_run.py
new file mode 100644
index 0000000..ec29b55
--- /dev/null
+++ b/build/android/pylib/remote/device/remote_device_test_run.py
@@ -0,0 +1,390 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Run specific test on specific environment."""
+
+import json
+import logging
+import os
+import re
+import shutil
+import string
+import tempfile
+import time
+import zipfile
+
+from devil.utils import zip_utils
+from pylib.base import base_test_result
+from pylib.base import test_run
+from pylib.remote.device import appurify_constants
+from pylib.remote.device import appurify_sanitized
+from pylib.remote.device import remote_device_helper
+
+_DEVICE_OFFLINE_RE = re.compile('error: device not found')
+_LONG_MSG_RE = re.compile('longMsg=(.*)$')
+_SHORT_MSG_RE = re.compile('shortMsg=(.*)$')
+
+class RemoteDeviceTestRun(test_run.TestRun):
+  """Run tests on a remote device."""
+
+  _TEST_RUN_KEY = 'test_run'
+  _TEST_RUN_ID_KEY = 'test_run_id'
+
+  WAIT_TIME = 5
+  COMPLETE = 'complete'
+  HEARTBEAT_INTERVAL = 300
+
+  def __init__(self, env, test_instance):
+    """Constructor.
+
+    Args:
+      env: Environment the tests will run in.
+      test_instance: The test that will be run.
+    """
+    super(RemoteDeviceTestRun, self).__init__(env, test_instance)
+    self._env = env
+    self._test_instance = test_instance
+    self._app_id = ''
+    self._test_id = ''
+    self._results = ''
+    self._test_run_id = ''
+    self._results_temp_dir = None
+
+  #override
+  def SetUp(self):
+    """Set up a test run."""
+    if self._env.trigger:
+      self._TriggerSetUp()
+    elif self._env.collect:
+      assert isinstance(self._env.collect, basestring), (
+                        'File for storing test_run_id must be a string.')
+      with open(self._env.collect, 'r') as persisted_data_file:
+        persisted_data = json.loads(persisted_data_file.read())
+        self._env.LoadFrom(persisted_data)
+        self.LoadFrom(persisted_data)
+
+  def _TriggerSetUp(self):
+    """Set up the triggering of a test run."""
+    raise NotImplementedError
+
+  #override
+  def RunTests(self):
+    """Run the test."""
+    if self._env.trigger:
+      with appurify_sanitized.SanitizeLogging(self._env.verbose_count,
+                                              logging.WARNING):
+        test_start_res = appurify_sanitized.api.tests_run(
+            self._env.token, self._env.device_type_id, self._app_id,
+            self._test_id)
+      remote_device_helper.TestHttpResponse(
+        test_start_res, 'Unable to run test.')
+      self._test_run_id = test_start_res.json()['response']['test_run_id']
+      logging.info('Test run id: %s', self._test_run_id)
+
+    if self._env.collect:
+      current_status = ''
+      timeout_counter = 0
+      heartbeat_counter = 0
+      while self._GetTestStatus(self._test_run_id) != self.COMPLETE:
+        if self._results['detailed_status'] != current_status:
+          logging.info('Test status: %s', self._results['detailed_status'])
+          current_status = self._results['detailed_status']
+          timeout_counter = 0
+          heartbeat_counter = 0
+        if heartbeat_counter > self.HEARTBEAT_INTERVAL:
+          logging.info('Test status: %s', self._results['detailed_status'])
+          heartbeat_counter = 0
+
+        timeout = self._env.timeouts.get(
+            current_status, self._env.timeouts['unknown'])
+        if timeout_counter > timeout:
+          raise remote_device_helper.RemoteDeviceError(
+              'Timeout while in %s state for %s seconds'
+              % (current_status, timeout),
+              is_infra_error=True)
+        time.sleep(self.WAIT_TIME)
+        timeout_counter += self.WAIT_TIME
+        heartbeat_counter += self.WAIT_TIME
+      self._DownloadTestResults(self._env.results_path)
+
+      if self._results['results']['exception']:
+        raise remote_device_helper.RemoteDeviceError(
+            self._results['results']['exception'], is_infra_error=True)
+
+      return self._ParseTestResults()
+
+  #override
+  def TearDown(self):
+    """Tear down the test run."""
+    if self._env.collect:
+      self._CollectTearDown()
+    elif self._env.trigger:
+      assert isinstance(self._env.trigger, basestring), (
+                        'File for storing test_run_id must be a string.')
+      with open(self._env.trigger, 'w') as persisted_data_file:
+        persisted_data = {}
+        self.DumpTo(persisted_data)
+        self._env.DumpTo(persisted_data)
+        persisted_data_file.write(json.dumps(persisted_data))
+
+  def _CollectTearDown(self):
+    if self._GetTestStatus(self._test_run_id) != self.COMPLETE:
+      with appurify_sanitized.SanitizeLogging(self._env.verbose_count,
+                                              logging.WARNING):
+        test_abort_res = appurify_sanitized.api.tests_abort(
+            self._env.token, self._test_run_id, reason='Test runner exiting.')
+      remote_device_helper.TestHttpResponse(test_abort_res,
+                                            'Unable to abort test.')
+    if self._results_temp_dir:
+      shutil.rmtree(self._results_temp_dir)
+
+  def __enter__(self):
+    """Set up the test run when used as a context manager."""
+    self.SetUp()
+    return self
+
+  def __exit__(self, exc_type, exc_val, exc_tb):
+    """Tear down the test run when used as a context manager."""
+    self.TearDown()
+
+  def DumpTo(self, persisted_data):
+    test_run_data = {
+      self._TEST_RUN_ID_KEY: self._test_run_id,
+    }
+    persisted_data[self._TEST_RUN_KEY] = test_run_data
+
+  def LoadFrom(self, persisted_data):
+    test_run_data = persisted_data[self._TEST_RUN_KEY]
+    self._test_run_id = test_run_data[self._TEST_RUN_ID_KEY]
+
+  def _ParseTestResults(self):
+    raise NotImplementedError
+
+  def _GetTestByName(self, test_name):
+    """Gets test_id for specific test.
+
+    Args:
+      test_name: Test to find the ID of.
+    """
+    with appurify_sanitized.SanitizeLogging(self._env.verbose_count,
+                                            logging.WARNING):
+      test_list_res = appurify_sanitized.api.tests_list(self._env.token)
+    remote_device_helper.TestHttpResponse(test_list_res,
+                                          'Unable to get tests list.')
+    for test in test_list_res.json()['response']:
+      if test['test_type'] == test_name:
+        return test['test_id']
+    raise remote_device_helper.RemoteDeviceError(
+        'No test found with name %s' % (test_name))
+
+  def _DownloadTestResults(self, results_path):
+    """Download the test results from remote device service.
+
+    Downloads results in temporary location, and then copys results
+    to results_path if results_path is not set to None.
+
+    Args:
+      results_path: Path to download appurify results zipfile.
+
+    Returns:
+      Path to downloaded file.
+    """
+
+    if self._results_temp_dir is None:
+      self._results_temp_dir = tempfile.mkdtemp()
+      logging.info('Downloading results to %s.', self._results_temp_dir)
+      with appurify_sanitized.SanitizeLogging(self._env.verbose_count,
+                                              logging.WARNING):
+        appurify_sanitized.utils.wget(self._results['results']['url'],
+                                      self._results_temp_dir + '/results')
+    if results_path:
+      logging.info('Copying results to %s', results_path)
+      if not os.path.exists(os.path.dirname(results_path)):
+        os.makedirs(os.path.dirname(results_path))
+      shutil.copy(self._results_temp_dir + '/results', results_path)
+    return self._results_temp_dir + '/results'
+
+  def _GetTestStatus(self, test_run_id):
+    """Checks the state of the test, and sets self._results
+
+    Args:
+      test_run_id: Id of test on on remote service.
+    """
+
+    with appurify_sanitized.SanitizeLogging(self._env.verbose_count,
+                                            logging.WARNING):
+      test_check_res = appurify_sanitized.api.tests_check_result(
+          self._env.token, test_run_id)
+    remote_device_helper.TestHttpResponse(test_check_res,
+                                          'Unable to get test status.')
+    self._results = test_check_res.json()['response']
+    return self._results['status']
+
+  def _AmInstrumentTestSetup(self, app_path, test_path, runner_package,
+                             environment_variables, extra_apks=None):
+    config = {'runner': runner_package}
+    if environment_variables:
+      config['environment_vars'] = ','.join(
+          '%s=%s' % (k, v) for k, v in environment_variables.iteritems())
+
+    self._app_id = self._UploadAppToDevice(app_path)
+
+    data_deps = self._test_instance.GetDataDependencies()
+    if data_deps:
+      with tempfile.NamedTemporaryFile(suffix='.zip') as test_with_deps:
+        sdcard_files = []
+        additional_apks = []
+        host_test = os.path.basename(test_path)
+        with zipfile.ZipFile(test_with_deps.name, 'w') as zip_file:
+          zip_file.write(test_path, host_test, zipfile.ZIP_DEFLATED)
+          for h, _ in data_deps:
+            if os.path.isdir(h):
+              zip_utils.WriteToZipFile(zip_file, h, '.')
+              sdcard_files.extend(os.listdir(h))
+            else:
+              zip_utils.WriteToZipFile(zip_file, h, os.path.basename(h))
+              sdcard_files.append(os.path.basename(h))
+          for a in extra_apks or ():
+            zip_utils.WriteToZipFile(zip_file, a, os.path.basename(a))
+            additional_apks.append(os.path.basename(a))
+
+        config['sdcard_files'] = ','.join(sdcard_files)
+        config['host_test'] = host_test
+        if additional_apks:
+          config['additional_apks'] = ','.join(additional_apks)
+        self._test_id = self._UploadTestToDevice(
+            'robotium', test_with_deps.name, app_id=self._app_id)
+    else:
+      self._test_id = self._UploadTestToDevice('robotium', test_path)
+
+    logging.info('Setting config: %s', config)
+    appurify_configs = {}
+    if self._env.network_config:
+      appurify_configs['network'] = self._env.network_config
+    self._SetTestConfig('robotium', config, **appurify_configs)
+
+  def _UploadAppToDevice(self, app_path):
+    """Upload app to device."""
+    logging.info('Uploading %s to remote service as %s.', app_path,
+                 self._test_instance.suite)
+    with open(app_path, 'rb') as apk_src:
+      with appurify_sanitized.SanitizeLogging(self._env.verbose_count,
+                                              logging.WARNING):
+        upload_results = appurify_sanitized.api.apps_upload(
+            self._env.token, apk_src, 'raw', name=self._test_instance.suite)
+      remote_device_helper.TestHttpResponse(
+          upload_results, 'Unable to upload %s.' % app_path)
+      return upload_results.json()['response']['app_id']
+
+  def _UploadTestToDevice(self, test_type, test_path, app_id=None):
+    """Upload test to device
+    Args:
+      test_type: Type of test that is being uploaded. Ex. uirobot, gtest..
+    """
+    logging.info('Uploading %s to remote service.', test_path)
+    with open(test_path, 'rb') as test_src:
+      with appurify_sanitized.SanitizeLogging(self._env.verbose_count,
+                                              logging.WARNING):
+        upload_results = appurify_sanitized.api.tests_upload(
+            self._env.token, test_src, 'raw', test_type, app_id=app_id)
+      remote_device_helper.TestHttpResponse(upload_results,
+          'Unable to upload %s.' % test_path)
+      return upload_results.json()['response']['test_id']
+
+  def _SetTestConfig(self, runner_type, runner_configs,
+                     network=appurify_constants.NETWORK.WIFI_1_BAR,
+                     pcap=0, profiler=0, videocapture=0):
+    """Generates and uploads config file for test.
+    Args:
+      runner_configs: Configs specific to the runner you are using.
+      network: Config to specify the network environment the devices running
+          the tests will be in.
+      pcap: Option to set the recording the of network traffic from the device.
+      profiler: Option to set the recording of CPU, memory, and network
+          transfer usage in the tests.
+      videocapture: Option to set video capture during the tests.
+
+    """
+    logging.info('Generating config file for test.')
+    with tempfile.TemporaryFile() as config:
+      config_data = [
+          '[appurify]',
+          'network=%s' % network,
+          'pcap=%s' % pcap,
+          'profiler=%s' % profiler,
+          'videocapture=%s' % videocapture,
+          '[%s]' % runner_type
+      ]
+      config_data.extend(
+          '%s=%s' % (k, v) for k, v in runner_configs.iteritems())
+      config.write(''.join('%s\n' % l for l in config_data))
+      config.flush()
+      config.seek(0)
+      with appurify_sanitized.SanitizeLogging(self._env.verbose_count,
+                                              logging.WARNING):
+        config_response = appurify_sanitized.api.config_upload(
+            self._env.token, config, self._test_id)
+      remote_device_helper.TestHttpResponse(
+          config_response, 'Unable to upload test config.')
+
+  def _LogLogcat(self, level=logging.CRITICAL):
+    """Prints out logcat downloaded from remote service.
+    Args:
+      level: logging level to print at.
+
+    Raises:
+      KeyError: If appurify_results/logcat.txt file cannot be found in
+                downloaded zip.
+    """
+    zip_file = self._DownloadTestResults(None)
+    with zipfile.ZipFile(zip_file) as z:
+      try:
+        logcat = z.read('appurify_results/logcat.txt')
+        printable_logcat = ''.join(c for c in logcat if c in string.printable)
+        for line in printable_logcat.splitlines():
+          logging.log(level, line)
+      except KeyError:
+        logging.error('No logcat found.')
+
+  def _LogAdbTraceLog(self):
+    zip_file = self._DownloadTestResults(None)
+    with zipfile.ZipFile(zip_file) as z:
+      adb_trace_log = z.read('adb_trace.log')
+      for line in adb_trace_log.splitlines():
+        logging.critical(line)
+
+  def _DidDeviceGoOffline(self):
+    zip_file = self._DownloadTestResults(None)
+    with zipfile.ZipFile(zip_file) as z:
+      adb_trace_log = z.read('adb_trace.log')
+      if any(_DEVICE_OFFLINE_RE.search(l) for l in adb_trace_log.splitlines()):
+        return True
+    return False
+
+  def _DetectPlatformErrors(self, results):
+    if not self._results['results']['pass']:
+      crash_msg = None
+      for line in self._results['results']['output'].splitlines():
+        m = _LONG_MSG_RE.search(line)
+        if m:
+          crash_msg = m.group(1)
+          break
+        m = _SHORT_MSG_RE.search(line)
+        if m:
+          crash_msg = m.group(1)
+      if crash_msg:
+        self._LogLogcat()
+        results.AddResult(base_test_result.BaseTestResult(
+            crash_msg, base_test_result.ResultType.CRASH))
+      elif self._DidDeviceGoOffline():
+        self._LogLogcat()
+        self._LogAdbTraceLog()
+        raise remote_device_helper.RemoteDeviceError(
+            'Remote service unable to reach device.', is_infra_error=True)
+      else:
+        # Remote service is reporting a failure, but no failure in results obj.
+        if results.DidRunPass():
+          results.AddResult(base_test_result.BaseTestResult(
+              'Remote service detected error.',
+              base_test_result.ResultType.UNKNOWN))
diff --git a/build/android/pylib/remote/device/remote_device_uirobot_test_run.py b/build/android/pylib/remote/device/remote_device_uirobot_test_run.py
new file mode 100644
index 0000000..f99e685
--- /dev/null
+++ b/build/android/pylib/remote/device/remote_device_uirobot_test_run.py
@@ -0,0 +1,85 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Run specific test on specific environment."""
+
+import logging
+
+from pylib.base import base_test_result
+from pylib.remote.device import appurify_sanitized
+from pylib.remote.device import remote_device_test_run
+from pylib.remote.device import remote_device_helper
+
+
+class RemoteDeviceUirobotTestRun(remote_device_test_run.RemoteDeviceTestRun):
+  """Run uirobot tests on a remote device."""
+
+
+  def __init__(self, env, test_instance):
+    """Constructor.
+
+    Args:
+      env: Environment the tests will run in.
+      test_instance: The test that will be run.
+    """
+    super(RemoteDeviceUirobotTestRun, self).__init__(env, test_instance)
+
+  #override
+  def TestPackage(self):
+    return self._test_instance.package_name
+
+  #override
+  def _TriggerSetUp(self):
+    """Set up the triggering of a test run."""
+    logging.info('Triggering test run.')
+
+    if self._env.device_type == 'Android':
+      default_runner_type = 'android_robot'
+    elif self._env.device_type == 'iOS':
+      default_runner_type = 'ios_robot'
+    else:
+      raise remote_device_helper.RemoteDeviceError(
+          'Unknown device type: %s' % self._env.device_type)
+
+    self._app_id = self._UploadAppToDevice(self._test_instance.app_under_test)
+    if not self._env.runner_type:
+      runner_type = default_runner_type
+      logging.info('Using default runner type: %s', default_runner_type)
+    else:
+      runner_type = self._env.runner_type
+
+    self._test_id = self._UploadTestToDevice(
+        'android_robot', None, app_id=self._app_id)
+    config_body = {'duration': self._test_instance.minutes}
+    self._SetTestConfig(runner_type, config_body)
+
+
+  # TODO(rnephew): Switch to base class implementation when supported.
+  #override
+  def _UploadTestToDevice(self, test_type, test_path, app_id=None):
+    if test_path:
+      logging.info("Ignoring test path.")
+    data = {
+        'access_token':self._env.token,
+        'test_type':test_type,
+        'app_id':app_id,
+    }
+    with appurify_sanitized.SanitizeLogging(self._env.verbose_count,
+                                            logging.WARNING):
+      test_upload_res = appurify_sanitized.utils.post('tests/upload',
+                                                      data, None)
+    remote_device_helper.TestHttpResponse(
+        test_upload_res, 'Unable to get UiRobot test id.')
+    return test_upload_res.json()['response']['test_id']
+
+  #override
+  def _ParseTestResults(self):
+    logging.info('Parsing results from remote service.')
+    results = base_test_result.TestRunResults()
+    if self._results['results']['pass']:
+      result_type = base_test_result.ResultType.PASS
+    else:
+      result_type = base_test_result.ResultType.FAIL
+    results.AddResult(base_test_result.BaseTestResult('uirobot', result_type))
+    return results
diff --git a/build/android/pylib/restart_adbd.sh b/build/android/pylib/restart_adbd.sh
new file mode 100755
index 0000000..393b2eb
--- /dev/null
+++ b/build/android/pylib/restart_adbd.sh
@@ -0,0 +1,20 @@
+#!/system/bin/sh
+
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Android shell script to restart adbd on the device. This has to be run
+# atomically as a shell script because stopping adbd prevents further commands
+# from running (even if called in the same adb shell).
+
+trap '' HUP
+trap '' TERM
+trap '' PIPE
+
+function restart() {
+  stop adbd
+  start adbd
+}
+
+restart &
diff --git a/build/android/pylib/results/__init__.py b/build/android/pylib/results/__init__.py
new file mode 100644
index 0000000..4d6aabb
--- /dev/null
+++ b/build/android/pylib/results/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/build/android/pylib/results/flakiness_dashboard/__init__.py b/build/android/pylib/results/flakiness_dashboard/__init__.py
new file mode 100644
index 0000000..4d6aabb
--- /dev/null
+++ b/build/android/pylib/results/flakiness_dashboard/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/build/android/pylib/results/flakiness_dashboard/json_results_generator.py b/build/android/pylib/results/flakiness_dashboard/json_results_generator.py
new file mode 100644
index 0000000..7f849f9
--- /dev/null
+++ b/build/android/pylib/results/flakiness_dashboard/json_results_generator.py
@@ -0,0 +1,696 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+#
+# Most of this file was ported over from Blink's
+# Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
+# Tools/Scripts/webkitpy/common/net/file_uploader.py
+#
+
+import json
+import logging
+import mimetypes
+import os
+import time
+import urllib2
+
+_log = logging.getLogger(__name__)
+
+_JSON_PREFIX = 'ADD_RESULTS('
+_JSON_SUFFIX = ');'
+
+
+def HasJSONWrapper(string):
+  return string.startswith(_JSON_PREFIX) and string.endswith(_JSON_SUFFIX)
+
+
+def StripJSONWrapper(json_content):
+  # FIXME: Kill this code once the server returns json instead of jsonp.
+  if HasJSONWrapper(json_content):
+    return json_content[len(_JSON_PREFIX):len(json_content) - len(_JSON_SUFFIX)]
+  return json_content
+
+
+def WriteJSON(json_object, file_path, callback=None):
+  # Specify separators in order to get compact encoding.
+  json_string = json.dumps(json_object, separators=(',', ':'))
+  if callback:
+    json_string = callback + '(' + json_string + ');'
+  with open(file_path, 'w') as fp:
+    fp.write(json_string)
+
+
+def ConvertTrieToFlatPaths(trie, prefix=None):
+  """Flattens the trie of paths, prepending a prefix to each."""
+  result = {}
+  for name, data in trie.iteritems():
+    if prefix:
+      name = prefix + '/' + name
+
+    if len(data) and not 'results' in data:
+      result.update(ConvertTrieToFlatPaths(data, name))
+    else:
+      result[name] = data
+
+  return result
+
+
+def AddPathToTrie(path, value, trie):
+  """Inserts a single path and value into a directory trie structure."""
+  if not '/' in path:
+    trie[path] = value
+    return
+
+  directory, _, rest = path.partition('/')
+  if not directory in trie:
+    trie[directory] = {}
+  AddPathToTrie(rest, value, trie[directory])
+
+
+def TestTimingsTrie(individual_test_timings):
+  """Breaks a test name into dicts by directory
+
+  foo/bar/baz.html: 1ms
+  foo/bar/baz1.html: 3ms
+
+  becomes
+  foo: {
+      bar: {
+          baz.html: 1,
+          baz1.html: 3
+      }
+  }
+  """
+  trie = {}
+  for test_result in individual_test_timings:
+    test = test_result.test_name
+
+    AddPathToTrie(test, int(1000 * test_result.test_run_time), trie)
+
+  return trie
+
+
+class TestResult(object):
+  """A simple class that represents a single test result."""
+
+  # Test modifier constants.
+  (NONE, FAILS, FLAKY, DISABLED) = range(4)
+
+  def __init__(self, test, failed=False, elapsed_time=0):
+    self.test_name = test
+    self.failed = failed
+    self.test_run_time = elapsed_time
+
+    test_name = test
+    try:
+      test_name = test.split('.')[1]
+    except IndexError:
+      _log.warn('Invalid test name: %s.', test)
+
+    if test_name.startswith('FAILS_'):
+      self.modifier = self.FAILS
+    elif test_name.startswith('FLAKY_'):
+      self.modifier = self.FLAKY
+    elif test_name.startswith('DISABLED_'):
+      self.modifier = self.DISABLED
+    else:
+      self.modifier = self.NONE
+
+  def Fixable(self):
+    return self.failed or self.modifier == self.DISABLED
+
+
+class JSONResultsGeneratorBase(object):
+  """A JSON results generator for generic tests."""
+
+  MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG = 750
+  # Min time (seconds) that will be added to the JSON.
+  MIN_TIME = 1
+
+  # Note that in non-chromium tests those chars are used to indicate
+  # test modifiers (FAILS, FLAKY, etc) but not actual test results.
+  PASS_RESULT = 'P'
+  SKIP_RESULT = 'X'
+  FAIL_RESULT = 'F'
+  FLAKY_RESULT = 'L'
+  NO_DATA_RESULT = 'N'
+
+  MODIFIER_TO_CHAR = {TestResult.NONE: PASS_RESULT,
+                      TestResult.DISABLED: SKIP_RESULT,
+                      TestResult.FAILS: FAIL_RESULT,
+                      TestResult.FLAKY: FLAKY_RESULT}
+
+  VERSION = 4
+  VERSION_KEY = 'version'
+  RESULTS = 'results'
+  TIMES = 'times'
+  BUILD_NUMBERS = 'buildNumbers'
+  TIME = 'secondsSinceEpoch'
+  TESTS = 'tests'
+
+  FIXABLE_COUNT = 'fixableCount'
+  FIXABLE = 'fixableCounts'
+  ALL_FIXABLE_COUNT = 'allFixableCount'
+
+  RESULTS_FILENAME = 'results.json'
+  TIMES_MS_FILENAME = 'times_ms.json'
+  INCREMENTAL_RESULTS_FILENAME = 'incremental_results.json'
+
+  # line too long pylint: disable=line-too-long
+  URL_FOR_TEST_LIST_JSON = (
+      'http://%s/testfile?builder=%s&name=%s&testlistjson=1&testtype=%s&master=%s')
+  # pylint: enable=line-too-long
+
+  def __init__(self, builder_name, build_name, build_number,
+               results_file_base_path, builder_base_url,
+               test_results_map, svn_repositories=None,
+               test_results_server=None,
+               test_type='',
+               master_name=''):
+    """Modifies the results.json file. Grabs it off the archive directory
+    if it is not found locally.
+
+    Args
+      builder_name: the builder name (e.g. Webkit).
+      build_name: the build name (e.g. webkit-rel).
+      build_number: the build number.
+      results_file_base_path: Absolute path to the directory containing the
+          results json file.
+      builder_base_url: the URL where we have the archived test results.
+          If this is None no archived results will be retrieved.
+      test_results_map: A dictionary that maps test_name to TestResult.
+      svn_repositories: A (json_field_name, svn_path) pair for SVN
+          repositories that tests rely on.  The SVN revision will be
+          included in the JSON with the given json_field_name.
+      test_results_server: server that hosts test results json.
+      test_type: test type string (e.g. 'layout-tests').
+      master_name: the name of the buildbot master.
+    """
+    self._builder_name = builder_name
+    self._build_name = build_name
+    self._build_number = build_number
+    self._builder_base_url = builder_base_url
+    self._results_directory = results_file_base_path
+
+    self._test_results_map = test_results_map
+    self._test_results = test_results_map.values()
+
+    self._svn_repositories = svn_repositories
+    if not self._svn_repositories:
+      self._svn_repositories = {}
+
+    self._test_results_server = test_results_server
+    self._test_type = test_type
+    self._master_name = master_name
+
+    self._archived_results = None
+
+  def GenerateJSONOutput(self):
+    json_object = self.GetJSON()
+    if json_object:
+      file_path = (
+          os.path.join(
+              self._results_directory,
+              self.INCREMENTAL_RESULTS_FILENAME))
+      WriteJSON(json_object, file_path)
+
+  def GenerateTimesMSFile(self):
+    times = TestTimingsTrie(self._test_results_map.values())
+    file_path = os.path.join(self._results_directory, self.TIMES_MS_FILENAME)
+    WriteJSON(times, file_path)
+
+  def GetJSON(self):
+    """Gets the results for the results.json file."""
+    results_json = {}
+
+    if not results_json:
+      results_json, error = self._GetArchivedJSONResults()
+      if error:
+        # If there was an error don't write a results.json
+        # file at all as it would lose all the information on the
+        # bot.
+        _log.error('Archive directory is inaccessible. Not '
+                   'modifying or clobbering the results.json '
+                   'file: ' + str(error))
+        return None
+
+    builder_name = self._builder_name
+    if results_json and builder_name not in results_json:
+      _log.debug('Builder name (%s) is not in the results.json file.',
+                 builder_name)
+
+    self._ConvertJSONToCurrentVersion(results_json)
+
+    if builder_name not in results_json:
+      results_json[builder_name] = (
+          self._CreateResultsForBuilderJSON())
+
+    results_for_builder = results_json[builder_name]
+
+    if builder_name:
+      self._InsertGenericMetaData(results_for_builder)
+
+    self._InsertFailureSummaries(results_for_builder)
+
+    # Update the all failing tests with result type and time.
+    tests = results_for_builder[self.TESTS]
+    all_failing_tests = self._GetFailedTestNames()
+    all_failing_tests.update(ConvertTrieToFlatPaths(tests))
+
+    for test in all_failing_tests:
+      self._InsertTestTimeAndResult(test, tests)
+
+    return results_json
+
+  def SetArchivedResults(self, archived_results):
+    self._archived_results = archived_results
+
+  def UploadJSONFiles(self, json_files):
+    """Uploads the given json_files to the test_results_server (if the
+    test_results_server is given)."""
+    if not self._test_results_server:
+      return
+
+    if not self._master_name:
+      _log.error(
+          '--test-results-server was set, but --master-name was not.  Not '
+          'uploading JSON files.')
+      return
+
+    _log.info('Uploading JSON files for builder: %s', self._builder_name)
+    attrs = [('builder', self._builder_name),
+             ('testtype', self._test_type),
+             ('master', self._master_name)]
+
+    files = [(json_file, os.path.join(self._results_directory, json_file))
+             for json_file in json_files]
+
+    url = 'http://%s/testfile/upload' % self._test_results_server
+    # Set uploading timeout in case appengine server is having problems.
+    # 120 seconds are more than enough to upload test results.
+    uploader = _FileUploader(url, 120)
+    try:
+      response = uploader.UploadAsMultipartFormData(files, attrs)
+      if response:
+        if response.code == 200:
+          _log.info('JSON uploaded.')
+        else:
+          _log.debug(
+              "JSON upload failed, %d: '%s'", response.code, response.read())
+      else:
+        _log.error('JSON upload failed; no response returned')
+    except Exception, err: # pylint: disable=broad-except
+      _log.error('Upload failed: %s', err)
+      return
+
+  def _GetTestTiming(self, test_name):
+    """Returns test timing data (elapsed time) in second
+    for the given test_name."""
+    if test_name in self._test_results_map:
+      # Floor for now to get time in seconds.
+      return int(self._test_results_map[test_name].test_run_time)
+    return 0
+
+  def _GetFailedTestNames(self):
+    """Returns a set of failed test names."""
+    return set([r.test_name for r in self._test_results if r.failed])
+
+  def _GetModifierChar(self, test_name):
+    """Returns a single char (e.g. SKIP_RESULT, FAIL_RESULT,
+    PASS_RESULT, NO_DATA_RESULT, etc) that indicates the test modifier
+    for the given test_name.
+    """
+    if test_name not in self._test_results_map:
+      return self.__class__.NO_DATA_RESULT
+
+    test_result = self._test_results_map[test_name]
+    if test_result.modifier in self.MODIFIER_TO_CHAR.keys():
+      return self.MODIFIER_TO_CHAR[test_result.modifier]
+
+    return self.__class__.PASS_RESULT
+
+  def _get_result_char(self, test_name):
+    """Returns a single char (e.g. SKIP_RESULT, FAIL_RESULT,
+    PASS_RESULT, NO_DATA_RESULT, etc) that indicates the test result
+    for the given test_name.
+    """
+    if test_name not in self._test_results_map:
+      return self.__class__.NO_DATA_RESULT
+
+    test_result = self._test_results_map[test_name]
+    if test_result.modifier == TestResult.DISABLED:
+      return self.__class__.SKIP_RESULT
+
+    if test_result.failed:
+      return self.__class__.FAIL_RESULT
+
+    return self.__class__.PASS_RESULT
+
+  def _GetSVNRevision(self, in_directory):
+    """Returns the svn revision for the given directory.
+
+    Args:
+      in_directory: The directory where svn is to be run.
+    """
+    # This is overridden in flakiness_dashboard_results_uploader.py.
+    raise NotImplementedError()
+
+  def _GetArchivedJSONResults(self):
+    """Download JSON file that only contains test
+    name list from test-results server. This is for generating incremental
+    JSON so the file generated has info for tests that failed before but
+    pass or are skipped from current run.
+
+    Returns (archived_results, error) tuple where error is None if results
+    were successfully read.
+    """
+    results_json = {}
+    old_results = None
+    error = None
+
+    if not self._test_results_server:
+      return {}, None
+
+    results_file_url = (self.URL_FOR_TEST_LIST_JSON %
+                        (urllib2.quote(self._test_results_server),
+                         urllib2.quote(self._builder_name),
+                         self.RESULTS_FILENAME,
+                         urllib2.quote(self._test_type),
+                         urllib2.quote(self._master_name)))
+
+    try:
+      # FIXME: We should talk to the network via a Host object.
+      results_file = urllib2.urlopen(results_file_url)
+      old_results = results_file.read()
+    except urllib2.HTTPError, http_error:
+      # A non-4xx status code means the bot is hosed for some reason
+      # and we can't grab the results.json file off of it.
+      if http_error.code < 400 and http_error.code >= 500:
+        error = http_error
+    except urllib2.URLError, url_error:
+      error = url_error
+
+    if old_results:
+      # Strip the prefix and suffix so we can get the actual JSON object.
+      old_results = StripJSONWrapper(old_results)
+
+      try:
+        results_json = json.loads(old_results)
+      except Exception: # pylint: disable=broad-except
+        _log.debug('results.json was not valid JSON. Clobbering.')
+        # The JSON file is not valid JSON. Just clobber the results.
+        results_json = {}
+    else:
+      _log.debug('Old JSON results do not exist. Starting fresh.')
+      results_json = {}
+
+    return results_json, error
+
+  def _InsertFailureSummaries(self, results_for_builder):
+    """Inserts aggregate pass/failure statistics into the JSON.
+    This method reads self._test_results and generates
+    FIXABLE, FIXABLE_COUNT and ALL_FIXABLE_COUNT entries.
+
+    Args:
+      results_for_builder: Dictionary containing the test results for a
+          single builder.
+    """
+    # Insert the number of tests that failed or skipped.
+    fixable_count = len([r for r in self._test_results if r.Fixable()])
+    self._InsertItemIntoRawList(results_for_builder,
+                                fixable_count, self.FIXABLE_COUNT)
+
+    # Create a test modifiers (FAILS, FLAKY etc) summary dictionary.
+    entry = {}
+    for test_name in self._test_results_map.iterkeys():
+      result_char = self._GetModifierChar(test_name)
+      entry[result_char] = entry.get(result_char, 0) + 1
+
+    # Insert the pass/skip/failure summary dictionary.
+    self._InsertItemIntoRawList(results_for_builder, entry,
+                                self.FIXABLE)
+
+    # Insert the number of all the tests that are supposed to pass.
+    all_test_count = len(self._test_results)
+    self._InsertItemIntoRawList(results_for_builder,
+                                all_test_count, self.ALL_FIXABLE_COUNT)
+
+  def _InsertItemIntoRawList(self, results_for_builder, item, key):
+    """Inserts the item into the list with the given key in the results for
+    this builder. Creates the list if no such list exists.
+
+    Args:
+      results_for_builder: Dictionary containing the test results for a
+          single builder.
+      item: Number or string to insert into the list.
+      key: Key in results_for_builder for the list to insert into.
+    """
+    if key in results_for_builder:
+      raw_list = results_for_builder[key]
+    else:
+      raw_list = []
+
+    raw_list.insert(0, item)
+    raw_list = raw_list[:self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG]
+    results_for_builder[key] = raw_list
+
+  def _InsertItemRunLengthEncoded(self, item, encoded_results):
+    """Inserts the item into the run-length encoded results.
+
+    Args:
+      item: String or number to insert.
+      encoded_results: run-length encoded results. An array of arrays, e.g.
+          [[3,'A'],[1,'Q']] encodes AAAQ.
+    """
+    if len(encoded_results) and item == encoded_results[0][1]:
+      num_results = encoded_results[0][0]
+      if num_results <= self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG:
+        encoded_results[0][0] = num_results + 1
+    else:
+      # Use a list instead of a class for the run-length encoding since
+      # we want the serialized form to be concise.
+      encoded_results.insert(0, [1, item])
+
+  def _InsertGenericMetaData(self, results_for_builder):
+    """ Inserts generic metadata (such as version number, current time etc)
+    into the JSON.
+
+    Args:
+      results_for_builder: Dictionary containing the test results for
+          a single builder.
+    """
+    self._InsertItemIntoRawList(results_for_builder,
+                                self._build_number, self.BUILD_NUMBERS)
+
+    # Include SVN revisions for the given repositories.
+    for (name, path) in self._svn_repositories:
+      # Note: for JSON file's backward-compatibility we use 'chrome' rather
+      # than 'chromium' here.
+      lowercase_name = name.lower()
+      if lowercase_name == 'chromium':
+        lowercase_name = 'chrome'
+      self._InsertItemIntoRawList(results_for_builder,
+                                  self._GetSVNRevision(path),
+                                  lowercase_name + 'Revision')
+
+    self._InsertItemIntoRawList(results_for_builder,
+                                int(time.time()),
+                                self.TIME)
+
+  def _InsertTestTimeAndResult(self, test_name, tests):
+    """ Insert a test item with its results to the given tests dictionary.
+
+    Args:
+      tests: Dictionary containing test result entries.
+    """
+
+    result = self._get_result_char(test_name)
+    test_time = self._GetTestTiming(test_name)
+
+    this_test = tests
+    for segment in test_name.split('/'):
+      if segment not in this_test:
+        this_test[segment] = {}
+      this_test = this_test[segment]
+
+    if not len(this_test):
+      self._PopulateResultsAndTimesJSON(this_test)
+
+    if self.RESULTS in this_test:
+      self._InsertItemRunLengthEncoded(result, this_test[self.RESULTS])
+    else:
+      this_test[self.RESULTS] = [[1, result]]
+
+    if self.TIMES in this_test:
+      self._InsertItemRunLengthEncoded(test_time, this_test[self.TIMES])
+    else:
+      this_test[self.TIMES] = [[1, test_time]]
+
+  def _ConvertJSONToCurrentVersion(self, results_json):
+    """If the JSON does not match the current version, converts it to the
+    current version and adds in the new version number.
+    """
+    if self.VERSION_KEY in results_json:
+      archive_version = results_json[self.VERSION_KEY]
+      if archive_version == self.VERSION:
+        return
+    else:
+      archive_version = 3
+
+    # version 3->4
+    if archive_version == 3:
+      for results in results_json.values():
+        self._ConvertTestsToTrie(results)
+
+    results_json[self.VERSION_KEY] = self.VERSION
+
+  def _ConvertTestsToTrie(self, results):
+    if not self.TESTS in results:
+      return
+
+    test_results = results[self.TESTS]
+    test_results_trie = {}
+    for test in test_results.iterkeys():
+      single_test_result = test_results[test]
+      AddPathToTrie(test, single_test_result, test_results_trie)
+
+    results[self.TESTS] = test_results_trie
+
+  def _PopulateResultsAndTimesJSON(self, results_and_times):
+    results_and_times[self.RESULTS] = []
+    results_and_times[self.TIMES] = []
+    return results_and_times
+
+  def _CreateResultsForBuilderJSON(self):
+    results_for_builder = {}
+    results_for_builder[self.TESTS] = {}
+    return results_for_builder
+
+  def _RemoveItemsOverMaxNumberOfBuilds(self, encoded_list):
+    """Removes items from the run-length encoded list after the final
+    item that exceeds the max number of builds to track.
+
+    Args:
+      encoded_results: run-length encoded results. An array of arrays, e.g.
+          [[3,'A'],[1,'Q']] encodes AAAQ.
+    """
+    num_builds = 0
+    index = 0
+    for result in encoded_list:
+      num_builds = num_builds + result[0]
+      index = index + 1
+      if num_builds > self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG:
+        return encoded_list[:index]
+    return encoded_list
+
+  def _NormalizeResultsJSON(self, test, test_name, tests):
+    """ Prune tests where all runs pass or tests that no longer exist and
+    truncate all results to maxNumberOfBuilds.
+
+    Args:
+      test: ResultsAndTimes object for this test.
+      test_name: Name of the test.
+      tests: The JSON object with all the test results for this builder.
+    """
+    test[self.RESULTS] = self._RemoveItemsOverMaxNumberOfBuilds(
+        test[self.RESULTS])
+    test[self.TIMES] = self._RemoveItemsOverMaxNumberOfBuilds(
+        test[self.TIMES])
+
+    is_all_pass = self._IsResultsAllOfType(test[self.RESULTS],
+                                           self.PASS_RESULT)
+    is_all_no_data = self._IsResultsAllOfType(test[self.RESULTS],
+                                              self.NO_DATA_RESULT)
+    max_time = max([test_time[1] for test_time in test[self.TIMES]])
+
+    # Remove all passes/no-data from the results to reduce noise and
+    # filesize. If a test passes every run, but takes > MIN_TIME to run,
+    # don't throw away the data.
+    if is_all_no_data or (is_all_pass and max_time <= self.MIN_TIME):
+      del tests[test_name]
+
+  # method could be a function pylint: disable=R0201
+  def _IsResultsAllOfType(self, results, result_type):
+    """Returns whether all the results are of the given type
+    (e.g. all passes)."""
+    return len(results) == 1 and results[0][1] == result_type
+
+
+class _FileUploader(object):
+
+  def __init__(self, url, timeout_seconds):
+    self._url = url
+    self._timeout_seconds = timeout_seconds
+
+  def UploadAsMultipartFormData(self, files, attrs):
+    file_objs = []
+    for filename, path in files:
+      with file(path, 'rb') as fp:
+        file_objs.append(('file', filename, fp.read()))
+
+    # FIXME: We should use the same variable names for the formal and actual
+    # parameters.
+    content_type, data = _EncodeMultipartFormData(attrs, file_objs)
+    return self._UploadData(content_type, data)
+
+  def _UploadData(self, content_type, data):
+    start = time.time()
+    end = start + self._timeout_seconds
+    while time.time() < end:
+      try:
+        request = urllib2.Request(self._url, data,
+                                  {'Content-Type': content_type})
+        return urllib2.urlopen(request)
+      except urllib2.HTTPError as e:
+        _log.warn("Received HTTP status %s loading \"%s\".  "
+                  'Retrying in 10 seconds...', e.code, e.filename)
+        time.sleep(10)
+
+
+def _GetMIMEType(filename):
+  return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
+
+
+# FIXME: Rather than taking tuples, this function should take more
+# structured data.
+def _EncodeMultipartFormData(fields, files):
+  """Encode form fields for multipart/form-data.
+
+  Args:
+    fields: A sequence of (name, value) elements for regular form fields.
+    files: A sequence of (name, filename, value) elements for data to be
+           uploaded as files.
+  Returns:
+    (content_type, body) ready for httplib.HTTP instance.
+
+  Source:
+    http://code.google.com/p/rietveld/source/browse/trunk/upload.py
+  """
+  BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
+  CRLF = '\r\n'
+  lines = []
+
+  for key, value in fields:
+    lines.append('--' + BOUNDARY)
+    lines.append('Content-Disposition: form-data; name="%s"' % key)
+    lines.append('')
+    if isinstance(value, unicode):
+      value = value.encode('utf-8')
+    lines.append(value)
+
+  for key, filename, value in files:
+    lines.append('--' + BOUNDARY)
+    lines.append('Content-Disposition: form-data; name="%s"; '
+                 'filename="%s"' % (key, filename))
+    lines.append('Content-Type: %s' % _GetMIMEType(filename))
+    lines.append('')
+    if isinstance(value, unicode):
+      value = value.encode('utf-8')
+    lines.append(value)
+
+  lines.append('--' + BOUNDARY + '--')
+  lines.append('')
+  body = CRLF.join(lines)
+  content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
+  return content_type, body
diff --git a/build/android/pylib/results/flakiness_dashboard/json_results_generator_unittest.py b/build/android/pylib/results/flakiness_dashboard/json_results_generator_unittest.py
new file mode 100644
index 0000000..d6aee05
--- /dev/null
+++ b/build/android/pylib/results/flakiness_dashboard/json_results_generator_unittest.py
@@ -0,0 +1,213 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+#
+# Most of this file was ported over from Blink's
+# webkitpy/layout_tests/layout_package/json_results_generator_unittest.py
+#
+
+import unittest
+import json
+
+from pylib.results.flakiness_dashboard import json_results_generator
+
+
+class JSONGeneratorTest(unittest.TestCase):
+
+  def setUp(self):
+    self.builder_name = 'DUMMY_BUILDER_NAME'
+    self.build_name = 'DUMMY_BUILD_NAME'
+    self.build_number = 'DUMMY_BUILDER_NUMBER'
+
+    # For archived results.
+    self._json = None
+    self._num_runs = 0
+    self._tests_set = set([])
+    self._test_timings = {}
+    self._failed_count_map = {}
+
+    self._PASS_count = 0
+    self._DISABLED_count = 0
+    self._FLAKY_count = 0
+    self._FAILS_count = 0
+    self._fixable_count = 0
+
+    self._orig_write_json = json_results_generator.WriteJSON
+
+    # unused arguments ... pylint: disable=W0613
+    def _WriteJSONStub(json_object, file_path, callback=None):
+      pass
+
+    json_results_generator.WriteJSON = _WriteJSONStub
+
+  def tearDown(self):
+    json_results_generator.WriteJSON = self._orig_write_json
+
+  def _TestJSONGeneration(self, passed_tests_list, failed_tests_list):
+    tests_set = set(passed_tests_list) | set(failed_tests_list)
+
+    DISABLED_tests = set([t for t in tests_set
+                          if t.startswith('DISABLED_')])
+    FLAKY_tests = set([t for t in tests_set
+                       if t.startswith('FLAKY_')])
+    FAILS_tests = set([t for t in tests_set
+                       if t.startswith('FAILS_')])
+    PASS_tests = tests_set - (DISABLED_tests | FLAKY_tests | FAILS_tests)
+
+    failed_tests = set(failed_tests_list) - DISABLED_tests
+    failed_count_map = dict([(t, 1) for t in failed_tests])
+
+    test_timings = {}
+    i = 0
+    for test in tests_set:
+      test_timings[test] = float(self._num_runs * 100 + i)
+      i += 1
+
+    test_results_map = dict()
+    for test in tests_set:
+      test_results_map[test] = json_results_generator.TestResult(
+          test, failed=(test in failed_tests),
+          elapsed_time=test_timings[test])
+
+    generator = json_results_generator.JSONResultsGeneratorBase(
+        self.builder_name, self.build_name, self.build_number,
+        '',
+        None,   # don't fetch past json results archive
+        test_results_map)
+
+    failed_count_map = dict([(t, 1) for t in failed_tests])
+
+    # Test incremental json results
+    incremental_json = generator.GetJSON()
+    self._VerifyJSONResults(
+        tests_set,
+        test_timings,
+        failed_count_map,
+        len(PASS_tests),
+        len(DISABLED_tests),
+        len(FLAKY_tests),
+        len(DISABLED_tests | failed_tests),
+        incremental_json,
+        1)
+
+    # We don't verify the results here, but at least we make sure the code
+    # runs without errors.
+    generator.GenerateJSONOutput()
+    generator.GenerateTimesMSFile()
+
+  def _VerifyJSONResults(self, tests_set, test_timings, failed_count_map,
+                         PASS_count, DISABLED_count, FLAKY_count,
+                         fixable_count, json_obj, num_runs):
+    # Aliasing to a short name for better access to its constants.
+    JRG = json_results_generator.JSONResultsGeneratorBase
+
+    self.assertIn(JRG.VERSION_KEY, json_obj)
+    self.assertIn(self.builder_name, json_obj)
+
+    buildinfo = json_obj[self.builder_name]
+    self.assertIn(JRG.FIXABLE, buildinfo)
+    self.assertIn(JRG.TESTS, buildinfo)
+    self.assertEqual(len(buildinfo[JRG.BUILD_NUMBERS]), num_runs)
+    self.assertEqual(buildinfo[JRG.BUILD_NUMBERS][0], self.build_number)
+
+    if tests_set or DISABLED_count:
+      fixable = {}
+      for fixable_items in buildinfo[JRG.FIXABLE]:
+        for (result_type, count) in fixable_items.iteritems():
+          if result_type in fixable:
+            fixable[result_type] = fixable[result_type] + count
+          else:
+            fixable[result_type] = count
+
+      if PASS_count:
+        self.assertEqual(fixable[JRG.PASS_RESULT], PASS_count)
+      else:
+        self.assertTrue(JRG.PASS_RESULT not in fixable or
+                        fixable[JRG.PASS_RESULT] == 0)
+      if DISABLED_count:
+        self.assertEqual(fixable[JRG.SKIP_RESULT], DISABLED_count)
+      else:
+        self.assertTrue(JRG.SKIP_RESULT not in fixable or
+                        fixable[JRG.SKIP_RESULT] == 0)
+      if FLAKY_count:
+        self.assertEqual(fixable[JRG.FLAKY_RESULT], FLAKY_count)
+      else:
+        self.assertTrue(JRG.FLAKY_RESULT not in fixable or
+                        fixable[JRG.FLAKY_RESULT] == 0)
+
+    if failed_count_map:
+      tests = buildinfo[JRG.TESTS]
+      for test_name in failed_count_map.iterkeys():
+        test = self._FindTestInTrie(test_name, tests)
+
+        failed = 0
+        for result in test[JRG.RESULTS]:
+          if result[1] == JRG.FAIL_RESULT:
+            failed += result[0]
+        self.assertEqual(failed_count_map[test_name], failed)
+
+        timing_count = 0
+        for timings in test[JRG.TIMES]:
+          if timings[1] == test_timings[test_name]:
+            timing_count = timings[0]
+        self.assertEqual(1, timing_count)
+
+    if fixable_count:
+      self.assertEqual(sum(buildinfo[JRG.FIXABLE_COUNT]), fixable_count)
+
+  def _FindTestInTrie(self, path, trie):
+    nodes = path.split('/')
+    sub_trie = trie
+    for node in nodes:
+      self.assertIn(node, sub_trie)
+      sub_trie = sub_trie[node]
+    return sub_trie
+
+  def testJSONGeneration(self):
+    self._TestJSONGeneration([], [])
+    self._TestJSONGeneration(['A1', 'B1'], [])
+    self._TestJSONGeneration([], ['FAILS_A2', 'FAILS_B2'])
+    self._TestJSONGeneration(['DISABLED_A3', 'DISABLED_B3'], [])
+    self._TestJSONGeneration(['A4'], ['B4', 'FAILS_C4'])
+    self._TestJSONGeneration(['DISABLED_C5', 'DISABLED_D5'], ['A5', 'B5'])
+    self._TestJSONGeneration(
+        ['A6', 'B6', 'FAILS_C6', 'DISABLED_E6', 'DISABLED_F6'],
+        ['FAILS_D6'])
+
+    # Generate JSON with the same test sets. (Both incremental results and
+    # archived results must be updated appropriately.)
+    self._TestJSONGeneration(
+        ['A', 'FLAKY_B', 'DISABLED_C'],
+        ['FAILS_D', 'FLAKY_E'])
+    self._TestJSONGeneration(
+        ['A', 'DISABLED_C', 'FLAKY_E'],
+        ['FLAKY_B', 'FAILS_D'])
+    self._TestJSONGeneration(
+        ['FLAKY_B', 'DISABLED_C', 'FAILS_D'],
+        ['A', 'FLAKY_E'])
+
+  def testHierarchicalJSNGeneration(self):
+    # FIXME: Re-work tests to be more comprehensible and comprehensive.
+    self._TestJSONGeneration(['foo/A'], ['foo/B', 'bar/C'])
+
+  def testTestTimingsTrie(self):
+    individual_test_timings = []
+    individual_test_timings.append(
+        json_results_generator.TestResult(
+            'foo/bar/baz.html',
+            elapsed_time=1.2))
+    individual_test_timings.append(
+        json_results_generator.TestResult('bar.html', elapsed_time=0.0001))
+    trie = json_results_generator.TestTimingsTrie(individual_test_timings)
+
+    expected_trie = {
+        'bar.html': 0,
+        'foo': {
+            'bar': {
+                'baz.html': 1200,
+            }
+        }
+    }
+
+    self.assertEqual(json.dumps(trie), json.dumps(expected_trie))
diff --git a/build/android/pylib/results/flakiness_dashboard/results_uploader.py b/build/android/pylib/results/flakiness_dashboard/results_uploader.py
new file mode 100644
index 0000000..71fbee1
--- /dev/null
+++ b/build/android/pylib/results/flakiness_dashboard/results_uploader.py
@@ -0,0 +1,181 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Uploads the results to the flakiness dashboard server."""
+# pylint: disable=E1002,R0201
+
+import logging
+import os
+import shutil
+import tempfile
+import xml
+
+
+from devil.utils import cmd_helper
+from pylib.constants import host_paths
+from pylib.results.flakiness_dashboard import json_results_generator
+from pylib.utils import repo_utils
+
+
+
+class JSONResultsGenerator(json_results_generator.JSONResultsGeneratorBase):
+  """Writes test results to a JSON file and handles uploading that file to
+  the test results server.
+  """
+  def __init__(self, builder_name, build_name, build_number, tmp_folder,
+               test_results_map, test_results_server, test_type, master_name):
+    super(JSONResultsGenerator, self).__init__(
+        builder_name=builder_name,
+        build_name=build_name,
+        build_number=build_number,
+        results_file_base_path=tmp_folder,
+        builder_base_url=None,
+        test_results_map=test_results_map,
+        svn_repositories=(('webkit', 'third_party/WebKit'),
+                          ('chrome', '.')),
+        test_results_server=test_results_server,
+        test_type=test_type,
+        master_name=master_name)
+
+  #override
+  def _GetModifierChar(self, test_name):
+    if test_name not in self._test_results_map:
+      return self.__class__.NO_DATA_RESULT
+
+    return self._test_results_map[test_name].modifier
+
+  #override
+  def _GetSVNRevision(self, in_directory):
+    """Returns the git/svn revision for the given directory.
+
+    Args:
+      in_directory: The directory relative to src.
+    """
+    def _is_git_directory(in_directory):
+      """Returns true if the given directory is in a git repository.
+
+      Args:
+        in_directory: The directory path to be tested.
+      """
+      if os.path.exists(os.path.join(in_directory, '.git')):
+        return True
+      parent = os.path.dirname(in_directory)
+      if parent == host_paths.DIR_SOURCE_ROOT or parent == in_directory:
+        return False
+      return _is_git_directory(parent)
+
+    in_directory = os.path.join(host_paths.DIR_SOURCE_ROOT, in_directory)
+
+    if not os.path.exists(os.path.join(in_directory, '.svn')):
+      if _is_git_directory(in_directory):
+        return repo_utils.GetGitHeadSHA1(in_directory)
+      else:
+        return ''
+
+    output = cmd_helper.GetCmdOutput(['svn', 'info', '--xml'], cwd=in_directory)
+    try:
+      dom = xml.dom.minidom.parseString(output)
+      return dom.getElementsByTagName('entry')[0].getAttribute('revision')
+    except xml.parsers.expat.ExpatError:
+      return ''
+    return ''
+
+
+class ResultsUploader(object):
+  """Handles uploading buildbot tests results to the flakiness dashboard."""
+  def __init__(self, tests_type):
+    self._build_number = os.environ.get('BUILDBOT_BUILDNUMBER')
+    self._builder_name = os.environ.get('BUILDBOT_BUILDERNAME')
+    self._tests_type = tests_type
+
+    if not self._build_number or not self._builder_name:
+      raise Exception('You should not be uploading tests results to the server'
+                      'from your local machine.')
+
+    upstream = (tests_type != 'Chromium_Android_Instrumentation')
+    if upstream:
+      # TODO(frankf): Use factory properties (see buildbot/bb_device_steps.py)
+      # This requires passing the actual master name (e.g. 'ChromiumFYI' not
+      # 'chromium.fyi').
+      from slave import slave_utils # pylint: disable=F0401
+      self._build_name = slave_utils.SlaveBuildName(host_paths.DIR_SOURCE_ROOT)
+      self._master_name = slave_utils.GetActiveMaster()
+    else:
+      self._build_name = 'chromium-android'
+      buildbot_branch = os.environ.get('BUILDBOT_BRANCH')
+      if not buildbot_branch:
+        buildbot_branch = 'master'
+      else:
+        # Ensure there's no leading "origin/"
+        buildbot_branch = buildbot_branch[buildbot_branch.find('/') + 1:]
+      self._master_name = '%s-%s' % (self._build_name, buildbot_branch)
+
+    self._test_results_map = {}
+
+  def AddResults(self, test_results):
+    # TODO(frankf): Differentiate between fail/crash/timeouts.
+    conversion_map = [
+        (test_results.GetPass(), False,
+            json_results_generator.JSONResultsGeneratorBase.PASS_RESULT),
+        (test_results.GetFail(), True,
+            json_results_generator.JSONResultsGeneratorBase.FAIL_RESULT),
+        (test_results.GetCrash(), True,
+            json_results_generator.JSONResultsGeneratorBase.FAIL_RESULT),
+        (test_results.GetTimeout(), True,
+            json_results_generator.JSONResultsGeneratorBase.FAIL_RESULT),
+        (test_results.GetUnknown(), True,
+            json_results_generator.JSONResultsGeneratorBase.NO_DATA_RESULT),
+        ]
+
+    for results_list, failed, modifier in conversion_map:
+      for single_test_result in results_list:
+        test_result = json_results_generator.TestResult(
+            test=single_test_result.GetName(),
+            failed=failed,
+            elapsed_time=single_test_result.GetDuration() / 1000)
+        # The WebKit TestResult object sets the modifier it based on test name.
+        # Since we don't use the same test naming convention as WebKit the
+        # modifier will be wrong, so we need to overwrite it.
+        test_result.modifier = modifier
+
+        self._test_results_map[single_test_result.GetName()] = test_result
+
+  def Upload(self, test_results_server):
+    if not self._test_results_map:
+      return
+
+    tmp_folder = tempfile.mkdtemp()
+
+    try:
+      results_generator = JSONResultsGenerator(
+          builder_name=self._builder_name,
+          build_name=self._build_name,
+          build_number=self._build_number,
+          tmp_folder=tmp_folder,
+          test_results_map=self._test_results_map,
+          test_results_server=test_results_server,
+          test_type=self._tests_type,
+          master_name=self._master_name)
+
+      json_files = ["incremental_results.json", "times_ms.json"]
+      results_generator.GenerateJSONOutput()
+      results_generator.GenerateTimesMSFile()
+      results_generator.UploadJSONFiles(json_files)
+    except Exception as e: # pylint: disable=broad-except
+      logging.error("Uploading results to test server failed: %s.", e)
+    finally:
+      shutil.rmtree(tmp_folder)
+
+
+def Upload(results, flakiness_dashboard_server, test_type):
+  """Reports test results to the flakiness dashboard for Chrome for Android.
+
+  Args:
+    results: test results.
+    flakiness_dashboard_server: the server to upload the results to.
+    test_type: the type of the tests (as displayed by the flakiness dashboard).
+  """
+  uploader = ResultsUploader(test_type)
+  uploader.AddResults(results)
+  uploader.Upload(flakiness_dashboard_server)
diff --git a/build/android/pylib/results/json_results.py b/build/android/pylib/results/json_results.py
new file mode 100644
index 0000000..1a60f64
--- /dev/null
+++ b/build/android/pylib/results/json_results.py
@@ -0,0 +1,156 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+
+from pylib.base import base_test_result
+
+
+def GenerateResultsDict(test_run_results):
+  """Create a results dict from |test_run_results| suitable for writing to JSON.
+  Args:
+    test_run_results: a list of base_test_result.TestRunResults objects.
+  Returns:
+    A results dict that mirrors the one generated by
+      base/test/launcher/test_results_tracker.cc:SaveSummaryAsJSON.
+  """
+  # Example json output.
+  # {
+  #   "global_tags": [],
+  #   "all_tests": [
+  #     "test1",
+  #     "test2",
+  #    ],
+  #   "disabled_tests": [],
+  #   "per_iteration_data": [
+  #     {
+  #       "test1": [
+  #         {
+  #           "status": "SUCCESS",
+  #           "elapsed_time_ms": 1,
+  #           "output_snippet": "",
+  #           "output_snippet_base64": "",
+  #           "losless_snippet": "",
+  #         },
+  #       ],
+  #       "test2": [
+  #         {
+  #           "status": "FAILURE",
+  #           "elapsed_time_ms": 12,
+  #           "output_snippet": "",
+  #           "output_snippet_base64": "",
+  #           "losless_snippet": "",
+  #         },
+  #       ],
+  #     },
+  #     {
+  #       "test1": [
+  #         {
+  #           "status": "SUCCESS",
+  #           "elapsed_time_ms": 1,
+  #           "output_snippet": "",
+  #           "output_snippet_base64": "",
+  #           "losless_snippet": "",
+  #         },
+  #       ],
+  #       "test2": [
+  #         {
+  #           "status": "FAILURE",
+  #           "elapsed_time_ms": 12,
+  #           "output_snippet": "",
+  #           "output_snippet_base64": "",
+  #           "losless_snippet": "",
+  #         },
+  #       ],
+  #     },
+  #     ...
+  #   ],
+  # }
+
+  def status_as_string(s):
+    if s == base_test_result.ResultType.PASS:
+      return 'SUCCESS'
+    elif s == base_test_result.ResultType.SKIP:
+      return 'SKIPPED'
+    elif s == base_test_result.ResultType.FAIL:
+      return 'FAILURE'
+    elif s == base_test_result.ResultType.CRASH:
+      return 'CRASH'
+    elif s == base_test_result.ResultType.TIMEOUT:
+      return 'TIMEOUT'
+    elif s == base_test_result.ResultType.UNKNOWN:
+      return 'UNKNOWN'
+
+  all_tests = set()
+  per_iteration_data = []
+  for test_run_result in test_run_results:
+    iteration_data = {
+      t.GetName(): [{
+        'status': status_as_string(t.GetType()),
+        'elapsed_time_ms': t.GetDuration(),
+        'output_snippet': '',
+        'losless_snippet': '',
+        'output_snippet_base64:': '',
+      }]
+      for t in test_run_result.GetAll()
+    }
+    all_tests = all_tests.union(set(iteration_data.iterkeys()))
+    per_iteration_data.append(iteration_data)
+
+  return {
+    'global_tags': [],
+    'all_tests': sorted(list(all_tests)),
+    # TODO(jbudorick): Add support for disabled tests within base_test_result.
+    'disabled_tests': [],
+    'per_iteration_data': per_iteration_data,
+  }
+
+
+def GenerateJsonResultsFile(test_run_result, file_path):
+  """Write |test_run_result| to JSON.
+
+  This emulates the format of the JSON emitted by
+  base/test/launcher/test_results_tracker.cc:SaveSummaryAsJSON.
+
+  Args:
+    test_run_result: a base_test_result.TestRunResults object.
+    file_path: The path to the JSON file to write.
+  """
+  with open(file_path, 'w') as json_result_file:
+    json_result_file.write(json.dumps(GenerateResultsDict(test_run_result)))
+
+
+def ParseResultsFromJson(json_results):
+  """Creates a list of BaseTestResult objects from JSON.
+
+  Args:
+    json_results: A JSON dict in the format created by
+                  GenerateJsonResultsFile.
+  """
+
+  def string_as_status(s):
+    if s == 'SUCCESS':
+      return base_test_result.ResultType.PASS
+    elif s == 'SKIPPED':
+      return base_test_result.ResultType.SKIP
+    elif s == 'FAILURE':
+      return base_test_result.ResultType.FAIL
+    elif s == 'CRASH':
+      return base_test_result.ResultType.CRASH
+    elif s == 'TIMEOUT':
+      return base_test_result.ResultType.TIMEOUT
+    else:
+      return base_test_result.ResultType.UNKNOWN
+
+  results_list = []
+  testsuite_runs = json_results['per_iteration_data']
+  for testsuite_run in testsuite_runs:
+    for test, test_runs in testsuite_run.iteritems():
+      results_list.extend(
+          [base_test_result.BaseTestResult(test,
+                                           string_as_status(tr['status']),
+                                           duration=tr['elapsed_time_ms'])
+          for tr in test_runs])
+  return results_list
+
diff --git a/build/android/pylib/results/json_results_test.py b/build/android/pylib/results/json_results_test.py
new file mode 100755
index 0000000..648f2c8
--- /dev/null
+++ b/build/android/pylib/results/json_results_test.py
@@ -0,0 +1,134 @@
+#!/usr/bin/env python
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from pylib.base import base_test_result
+from pylib.results import json_results
+
+
+class JsonResultsTest(unittest.TestCase):
+
+  def testGenerateResultsDict_passedResult(self):
+    result = base_test_result.BaseTestResult(
+        'test.package.TestName', base_test_result.ResultType.PASS)
+
+    all_results = base_test_result.TestRunResults()
+    all_results.AddResult(result)
+
+    results_dict = json_results.GenerateResultsDict([all_results])
+    self.assertEquals(
+        ['test.package.TestName'],
+        results_dict['all_tests'])
+    self.assertEquals(1, len(results_dict['per_iteration_data']))
+
+    iteration_result = results_dict['per_iteration_data'][0]
+    self.assertTrue('test.package.TestName' in iteration_result)
+    self.assertEquals(1, len(iteration_result['test.package.TestName']))
+
+    test_iteration_result = iteration_result['test.package.TestName'][0]
+    self.assertTrue('status' in test_iteration_result)
+    self.assertEquals('SUCCESS', test_iteration_result['status'])
+
+  def testGenerateResultsDict_skippedResult(self):
+    result = base_test_result.BaseTestResult(
+        'test.package.TestName', base_test_result.ResultType.SKIP)
+
+    all_results = base_test_result.TestRunResults()
+    all_results.AddResult(result)
+
+    results_dict = json_results.GenerateResultsDict([all_results])
+    self.assertEquals(
+        ['test.package.TestName'],
+        results_dict['all_tests'])
+    self.assertEquals(1, len(results_dict['per_iteration_data']))
+
+    iteration_result = results_dict['per_iteration_data'][0]
+    self.assertTrue('test.package.TestName' in iteration_result)
+    self.assertEquals(1, len(iteration_result['test.package.TestName']))
+
+    test_iteration_result = iteration_result['test.package.TestName'][0]
+    self.assertTrue('status' in test_iteration_result)
+    self.assertEquals('SKIPPED', test_iteration_result['status'])
+
+  def testGenerateResultsDict_failedResult(self):
+    result = base_test_result.BaseTestResult(
+        'test.package.TestName', base_test_result.ResultType.FAIL)
+
+    all_results = base_test_result.TestRunResults()
+    all_results.AddResult(result)
+
+    results_dict = json_results.GenerateResultsDict([all_results])
+    self.assertEquals(
+        ['test.package.TestName'],
+        results_dict['all_tests'])
+    self.assertEquals(1, len(results_dict['per_iteration_data']))
+
+    iteration_result = results_dict['per_iteration_data'][0]
+    self.assertTrue('test.package.TestName' in iteration_result)
+    self.assertEquals(1, len(iteration_result['test.package.TestName']))
+
+    test_iteration_result = iteration_result['test.package.TestName'][0]
+    self.assertTrue('status' in test_iteration_result)
+    self.assertEquals('FAILURE', test_iteration_result['status'])
+
+  def testGenerateResultsDict_duration(self):
+    result = base_test_result.BaseTestResult(
+        'test.package.TestName', base_test_result.ResultType.PASS, duration=123)
+
+    all_results = base_test_result.TestRunResults()
+    all_results.AddResult(result)
+
+    results_dict = json_results.GenerateResultsDict([all_results])
+    self.assertEquals(
+        ['test.package.TestName'],
+        results_dict['all_tests'])
+    self.assertEquals(1, len(results_dict['per_iteration_data']))
+
+    iteration_result = results_dict['per_iteration_data'][0]
+    self.assertTrue('test.package.TestName' in iteration_result)
+    self.assertEquals(1, len(iteration_result['test.package.TestName']))
+
+    test_iteration_result = iteration_result['test.package.TestName'][0]
+    self.assertTrue('elapsed_time_ms' in test_iteration_result)
+    self.assertEquals(123, test_iteration_result['elapsed_time_ms'])
+
+  def testGenerateResultsDict_multipleResults(self):
+    result1 = base_test_result.BaseTestResult(
+        'test.package.TestName1', base_test_result.ResultType.PASS)
+    result2 = base_test_result.BaseTestResult(
+        'test.package.TestName2', base_test_result.ResultType.PASS)
+
+    all_results = base_test_result.TestRunResults()
+    all_results.AddResult(result1)
+    all_results.AddResult(result2)
+
+    results_dict = json_results.GenerateResultsDict([all_results])
+    self.assertEquals(
+        ['test.package.TestName1', 'test.package.TestName2'],
+        results_dict['all_tests'])
+
+    self.assertTrue('per_iteration_data' in results_dict)
+    iterations = results_dict['per_iteration_data']
+    self.assertEquals(1, len(iterations))
+
+    expected_tests = set([
+        'test.package.TestName1',
+        'test.package.TestName2',
+    ])
+
+    for test_name, iteration_result in iterations[0].iteritems():
+      self.assertTrue(test_name in expected_tests)
+      expected_tests.remove(test_name)
+      self.assertEquals(1, len(iteration_result))
+
+      test_iteration_result = iteration_result[0]
+      self.assertTrue('status' in test_iteration_result)
+      self.assertEquals('SUCCESS', test_iteration_result['status'])
+
+
+if __name__ == '__main__':
+  unittest.main(verbosity=2)
+
diff --git a/build/android/pylib/results/report_results.py b/build/android/pylib/results/report_results.py
new file mode 100644
index 0000000..d39acd0
--- /dev/null
+++ b/build/android/pylib/results/report_results.py
@@ -0,0 +1,116 @@
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Module containing utility functions for reporting results."""
+
+import logging
+import os
+import re
+
+from pylib import constants
+from pylib.results.flakiness_dashboard import results_uploader
+
+
+def _LogToFile(results, test_type, suite_name):
+  """Log results to local files which can be used for aggregation later."""
+  log_file_path = os.path.join(constants.GetOutDirectory(), 'test_logs')
+  if not os.path.exists(log_file_path):
+    os.mkdir(log_file_path)
+  full_file_name = os.path.join(
+      log_file_path, re.sub(r'\W', '_', test_type).lower() + '.log')
+  if not os.path.exists(full_file_name):
+    with open(full_file_name, 'w') as log_file:
+      print >> log_file, '\n%s results for %s build %s:' % (
+          test_type, os.environ.get('BUILDBOT_BUILDERNAME'),
+          os.environ.get('BUILDBOT_BUILDNUMBER'))
+    logging.info('Writing results to %s.', full_file_name)
+
+  logging.info('Writing results to %s.', full_file_name)
+  with open(full_file_name, 'a') as log_file:
+    shortened_suite_name = suite_name[:25] + (suite_name[25:] and '...')
+    print >> log_file, '%s%s' % (shortened_suite_name.ljust(30),
+                                 results.GetShortForm())
+
+
+def _LogToFlakinessDashboard(results, test_type, test_package,
+                             flakiness_server):
+  """Upload results to the flakiness dashboard"""
+  logging.info('Upload results for test type "%s", test package "%s" to %s',
+               test_type, test_package, flakiness_server)
+
+  try:
+    # TODO(jbudorick): remove Instrumentation once instrumentation tests
+    # switch to platform mode.
+    if test_type in ('instrumentation', 'Instrumentation'):
+      if flakiness_server == constants.UPSTREAM_FLAKINESS_SERVER:
+        assert test_package in ['ContentShellTest',
+                                'ChromePublicTest',
+                                'ChromeSyncShellTest',
+                                'AndroidWebViewTest',
+                                'SystemWebViewShellLayoutTest']
+        dashboard_test_type = ('%s_instrumentation_tests' %
+                               test_package.lower().rstrip('test'))
+      # Downstream server.
+      else:
+        dashboard_test_type = 'Chromium_Android_Instrumentation'
+
+    elif test_type == 'gtest':
+      dashboard_test_type = test_package
+
+    else:
+      logging.warning('Invalid test type')
+      return
+
+    results_uploader.Upload(
+        results, flakiness_server, dashboard_test_type)
+
+  except Exception: # pylint: disable=broad-except
+    logging.exception('Failure while logging to %s', flakiness_server)
+
+
+def LogFull(results, test_type, test_package, annotation=None,
+            flakiness_server=None):
+  """Log the tests results for the test suite.
+
+  The results will be logged three different ways:
+    1. Log to stdout.
+    2. Log to local files for aggregating multiple test steps
+       (on buildbots only).
+    3. Log to flakiness dashboard (on buildbots only).
+
+  Args:
+    results: An instance of TestRunResults object.
+    test_type: Type of the test (e.g. 'Instrumentation', 'Unit test', etc.).
+    test_package: Test package name (e.g. 'ipc_tests' for gtests,
+                  'ContentShellTest' for instrumentation tests)
+    annotation: If instrumenation test type, this is a list of annotations
+                (e.g. ['Smoke', 'SmallTest']).
+    flakiness_server: If provider, upload the results to flakiness dashboard
+                      with this URL.
+    """
+  if not results.DidRunPass():
+    logging.critical('*' * 80)
+    logging.critical('Detailed Logs')
+    logging.critical('*' * 80)
+    for line in results.GetLogs().splitlines():
+      logging.critical(line)
+  logging.critical('*' * 80)
+  logging.critical('Summary')
+  logging.critical('*' * 80)
+  for line in results.GetGtestForm().splitlines():
+    logging.critical(line)
+  logging.critical('*' * 80)
+
+  if os.environ.get('BUILDBOT_BUILDERNAME'):
+    # It is possible to have multiple buildbot steps for the same
+    # instrumenation test package using different annotations.
+    if annotation and len(annotation) == 1:
+      suite_name = annotation[0]
+    else:
+      suite_name = test_package
+    _LogToFile(results, test_type, suite_name)
+
+    if flakiness_server:
+      _LogToFlakinessDashboard(results, test_type, test_package,
+                               flakiness_server)
diff --git a/build/android/pylib/sdk/__init__.py b/build/android/pylib/sdk/__init__.py
new file mode 100644
index 0000000..50b23df
--- /dev/null
+++ b/build/android/pylib/sdk/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/build/android/pylib/sdk/aapt.py b/build/android/pylib/sdk/aapt.py
new file mode 100644
index 0000000..96fbf9c
--- /dev/null
+++ b/build/android/pylib/sdk/aapt.py
@@ -0,0 +1,8 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=unused-wildcard-import
+# pylint: disable=wildcard-import
+
+from devil.android.sdk.aapt import *
diff --git a/build/android/pylib/sdk/dexdump.py b/build/android/pylib/sdk/dexdump.py
new file mode 100644
index 0000000..f7357f7
--- /dev/null
+++ b/build/android/pylib/sdk/dexdump.py
@@ -0,0 +1,8 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=unused-wildcard-import
+# pylint: disable=wildcard-import
+
+from devil.android.sdk.dexdump import *
diff --git a/build/android/pylib/sdk/split_select.py b/build/android/pylib/sdk/split_select.py
new file mode 100644
index 0000000..6adb106
--- /dev/null
+++ b/build/android/pylib/sdk/split_select.py
@@ -0,0 +1,8 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=unused-wildcard-import
+# pylint: disable=wildcard-import
+
+from devil.android.sdk.split_select import *
diff --git a/build/android/pylib/symbols/__init__.py b/build/android/pylib/symbols/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/build/android/pylib/symbols/__init__.py
diff --git a/build/android/pylib/symbols/elf_symbolizer.py b/build/android/pylib/symbols/elf_symbolizer.py
new file mode 100644
index 0000000..ed3fe5c
--- /dev/null
+++ b/build/android/pylib/symbols/elf_symbolizer.py
@@ -0,0 +1,468 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+import datetime
+import logging
+import multiprocessing
+import os
+import posixpath
+import Queue
+import re
+import subprocess
+import sys
+import threading
+import time
+
+
+# addr2line builds a possibly infinite memory cache that can exhaust
+# the computer's memory if allowed to grow for too long. This constant
+# controls how many lookups we do before restarting the process. 4000
+# gives near peak performance without extreme memory usage.
+ADDR2LINE_RECYCLE_LIMIT = 4000
+
+
+class ELFSymbolizer(object):
+  """An uber-fast (multiprocessing, pipelined and asynchronous) ELF symbolizer.
+
+  This class is a frontend for addr2line (part of GNU binutils), designed to
+  symbolize batches of large numbers of symbols for a given ELF file. It
+  supports sharding symbolization against many addr2line instances and
+  pipelining of multiple requests per each instance (in order to hide addr2line
+  internals and OS pipe latencies).
+
+  The interface exhibited by this class is a very simple asynchronous interface,
+  which is based on the following three methods:
+  - SymbolizeAsync(): used to request (enqueue) resolution of a given address.
+  - The |callback| method: used to communicated back the symbol information.
+  - Join(): called to conclude the batch to gather the last outstanding results.
+  In essence, before the Join method returns, this class will have issued as
+  many callbacks as the number of SymbolizeAsync() calls. In this regard, note
+  that due to multiprocess sharding, callbacks can be delivered out of order.
+
+  Some background about addr2line:
+  - it is invoked passing the elf path in the cmdline, piping the addresses in
+    its stdin and getting results on its stdout.
+  - it has pretty large response times for the first requests, but it
+    works very well in streaming mode once it has been warmed up.
+  - it doesn't scale by itself (on more cores). However, spawning multiple
+    instances at the same time on the same file is pretty efficient as they
+    keep hitting the pagecache and become mostly CPU bound.
+  - it might hang or crash, mostly for OOM. This class deals with both of these
+    problems.
+
+  Despite the "scary" imports and the multi* words above, (almost) no multi-
+  threading/processing is involved from the python viewpoint. Concurrency
+  here is achieved by spawning several addr2line subprocesses and handling their
+  output pipes asynchronously. Therefore, all the code here (with the exception
+  of the Queue instance in Addr2Line) should be free from mind-blowing
+  thread-safety concerns.
+
+  The multiprocess sharding works as follows:
+  The symbolizer tries to use the lowest number of addr2line instances as
+  possible (with respect of |max_concurrent_jobs|) and enqueue all the requests
+  in a single addr2line instance. For few symbols (i.e. dozens) sharding isn't
+  worth the startup cost.
+  The multiprocess logic kicks in as soon as the queues for the existing
+  instances grow. Specifically, once all the existing instances reach the
+  |max_queue_size| bound, a new addr2line instance is kicked in.
+  In the case of a very eager producer (i.e. all |max_concurrent_jobs| instances
+  have a backlog of |max_queue_size|), back-pressure is applied on the caller by
+  blocking the SymbolizeAsync method.
+
+  This module has been deliberately designed to be dependency free (w.r.t. of
+  other modules in this project), to allow easy reuse in external projects.
+  """
+
+  def __init__(self, elf_file_path, addr2line_path, callback, inlines=False,
+      max_concurrent_jobs=None, addr2line_timeout=30, max_queue_size=50,
+      source_root_path=None, strip_base_path=None):
+    """Args:
+      elf_file_path: path of the elf file to be symbolized.
+      addr2line_path: path of the toolchain's addr2line binary.
+      callback: a callback which will be invoked for each resolved symbol with
+          the two args (sym_info, callback_arg). The former is an instance of
+          |ELFSymbolInfo| and contains the symbol information. The latter is an
+          embedder-provided argument which is passed to SymbolizeAsync().
+      inlines: when True, the ELFSymbolInfo will contain also the details about
+          the outer inlining functions. When False, only the innermost function
+          will be provided.
+      max_concurrent_jobs: Max number of addr2line instances spawned.
+          Parallelize responsibly, addr2line is a memory and I/O monster.
+      max_queue_size: Max number of outstanding requests per addr2line instance.
+      addr2line_timeout: Max time (in seconds) to wait for a addr2line response.
+          After the timeout, the instance will be considered hung and respawned.
+      source_root_path: In some toolchains only the name of the source file is
+          is output, without any path information; disambiguation searches
+          through the source directory specified by |source_root_path| argument
+          for files whose name matches, adding the full path information to the
+          output. For example, if the toolchain outputs "unicode.cc" and there
+          is a file called "unicode.cc" located under |source_root_path|/foo,
+          the tool will replace "unicode.cc" with
+          "|source_root_path|/foo/unicode.cc". If there are multiple files with
+          the same name, disambiguation will fail because the tool cannot
+          determine which of the files was the source of the symbol.
+      strip_base_path: Rebases the symbols source paths onto |source_root_path|
+          (i.e replace |strip_base_path| with |source_root_path).
+    """
+    assert(os.path.isfile(addr2line_path)), 'Cannot find ' + addr2line_path
+    self.elf_file_path = elf_file_path
+    self.addr2line_path = addr2line_path
+    self.callback = callback
+    self.inlines = inlines
+    self.max_concurrent_jobs = (max_concurrent_jobs or
+                                min(multiprocessing.cpu_count(), 4))
+    self.max_queue_size = max_queue_size
+    self.addr2line_timeout = addr2line_timeout
+    self.requests_counter = 0  # For generating monotonic request IDs.
+    self._a2l_instances = []  # Up to |max_concurrent_jobs| _Addr2Line inst.
+
+    # If necessary, create disambiguation lookup table
+    self.disambiguate = source_root_path is not None
+    self.disambiguation_table = {}
+    self.strip_base_path = strip_base_path
+    if self.disambiguate:
+      self.source_root_path = os.path.abspath(source_root_path)
+      self._CreateDisambiguationTable()
+
+    # Create one addr2line instance. More instances will be created on demand
+    # (up to |max_concurrent_jobs|) depending on the rate of the requests.
+    self._CreateNewA2LInstance()
+
+  def SymbolizeAsync(self, addr, callback_arg=None):
+    """Requests symbolization of a given address.
+
+    This method is not guaranteed to return immediately. It generally does, but
+    in some scenarios (e.g. all addr2line instances have full queues) it can
+    block to create back-pressure.
+
+    Args:
+      addr: address to symbolize.
+      callback_arg: optional argument which will be passed to the |callback|."""
+    assert isinstance(addr, int)
+
+    # Process all the symbols that have been resolved in the meanwhile.
+    # Essentially, this drains all the addr2line(s) out queues.
+    for a2l_to_purge in self._a2l_instances:
+      a2l_to_purge.ProcessAllResolvedSymbolsInQueue()
+      a2l_to_purge.RecycleIfNecessary()
+
+    # Find the best instance according to this logic:
+    # 1. Find an existing instance with the shortest queue.
+    # 2. If all of instances' queues are full, but there is room in the pool,
+    #    (i.e. < |max_concurrent_jobs|) create a new instance.
+    # 3. If there were already |max_concurrent_jobs| instances and all of them
+    #    had full queues, make back-pressure.
+
+    # 1.
+    def _SortByQueueSizeAndReqID(a2l):
+      return (a2l.queue_size, a2l.first_request_id)
+    a2l = min(self._a2l_instances, key=_SortByQueueSizeAndReqID)
+
+    # 2.
+    if (a2l.queue_size >= self.max_queue_size and
+        len(self._a2l_instances) < self.max_concurrent_jobs):
+      a2l = self._CreateNewA2LInstance()
+
+    # 3.
+    if a2l.queue_size >= self.max_queue_size:
+      a2l.WaitForNextSymbolInQueue()
+
+    a2l.EnqueueRequest(addr, callback_arg)
+
+  def Join(self):
+    """Waits for all the outstanding requests to complete and terminates."""
+    for a2l in self._a2l_instances:
+      a2l.WaitForIdle()
+      a2l.Terminate()
+
+  def _CreateNewA2LInstance(self):
+    assert len(self._a2l_instances) < self.max_concurrent_jobs
+    a2l = ELFSymbolizer.Addr2Line(self)
+    self._a2l_instances.append(a2l)
+    return a2l
+
+  def _CreateDisambiguationTable(self):
+    """ Non-unique file names will result in None entries"""
+    start_time = time.time()
+    logging.info('Collecting information about available source files...')
+    self.disambiguation_table = {}
+
+    for root, _, filenames in os.walk(self.source_root_path):
+      for f in filenames:
+        self.disambiguation_table[f] = os.path.join(root, f) if (f not in
+                                       self.disambiguation_table) else None
+    logging.info('Finished collecting information about '
+                 'possible files (took %.1f s).',
+                 (time.time() - start_time))
+
+
+  class Addr2Line(object):
+    """A python wrapper around an addr2line instance.
+
+    The communication with the addr2line process looks as follows:
+      [STDIN]         [STDOUT]  (from addr2line's viewpoint)
+    > f001111
+    > f002222
+                    < Symbol::Name(foo, bar) for f001111
+                    < /path/to/source/file.c:line_number
+    > f003333
+                    < Symbol::Name2() for f002222
+                    < /path/to/source/file.c:line_number
+                    < Symbol::Name3() for f003333
+                    < /path/to/source/file.c:line_number
+    """
+
+    SYM_ADDR_RE = re.compile(r'([^:]+):(\?|\d+).*')
+
+    def __init__(self, symbolizer):
+      self._symbolizer = symbolizer
+      self._lib_file_name = posixpath.basename(symbolizer.elf_file_path)
+
+      # The request queue (i.e. addresses pushed to addr2line's stdin and not
+      # yet retrieved on stdout)
+      self._request_queue = collections.deque()
+
+      # This is essentially len(self._request_queue). It has been optimized to a
+      # separate field because turned out to be a perf hot-spot.
+      self.queue_size = 0
+
+      # Keep track of the number of symbols a process has processed to
+      # avoid a single process growing too big and using all the memory.
+      self._processed_symbols_count = 0
+
+      # Objects required to handle the addr2line subprocess.
+      self._proc = None  # Subprocess.Popen(...) instance.
+      self._thread = None  # Threading.thread instance.
+      self._out_queue = None  # Queue.Queue instance (for buffering a2l stdout).
+      self._RestartAddr2LineProcess()
+
+    def EnqueueRequest(self, addr, callback_arg):
+      """Pushes an address to addr2line's stdin (and keeps track of it)."""
+      self._symbolizer.requests_counter += 1  # For global "age" of requests.
+      req_idx = self._symbolizer.requests_counter
+      self._request_queue.append((addr, callback_arg, req_idx))
+      self.queue_size += 1
+      self._WriteToA2lStdin(addr)
+
+    def WaitForIdle(self):
+      """Waits until all the pending requests have been symbolized."""
+      while self.queue_size > 0:
+        self.WaitForNextSymbolInQueue()
+
+    def WaitForNextSymbolInQueue(self):
+      """Waits for the next pending request to be symbolized."""
+      if not self.queue_size:
+        return
+
+      # This outer loop guards against a2l hanging (detecting stdout timeout).
+      while True:
+        start_time = datetime.datetime.now()
+        timeout = datetime.timedelta(seconds=self._symbolizer.addr2line_timeout)
+
+        # The inner loop guards against a2l crashing (checking if it exited).
+        while datetime.datetime.now() - start_time < timeout:
+          # poll() returns !None if the process exited. a2l should never exit.
+          if self._proc.poll():
+            logging.warning('addr2line crashed, respawning (lib: %s).',
+                            self._lib_file_name)
+            self._RestartAddr2LineProcess()
+            # TODO(primiano): the best thing to do in this case would be
+            # shrinking the pool size as, very likely, addr2line is crashed
+            # due to low memory (and the respawned one will die again soon).
+
+          try:
+            lines = self._out_queue.get(block=True, timeout=0.25)
+          except Queue.Empty:
+            # On timeout (1/4 s.) repeat the inner loop and check if either the
+            # addr2line process did crash or we waited its output for too long.
+            continue
+
+          # In nominal conditions, we get straight to this point.
+          self._ProcessSymbolOutput(lines)
+          return
+
+        # If this point is reached, we waited more than |addr2line_timeout|.
+        logging.warning('Hung addr2line process, respawning (lib: %s).',
+                        self._lib_file_name)
+        self._RestartAddr2LineProcess()
+
+    def ProcessAllResolvedSymbolsInQueue(self):
+      """Consumes all the addr2line output lines produced (without blocking)."""
+      if not self.queue_size:
+        return
+      while True:
+        try:
+          lines = self._out_queue.get_nowait()
+        except Queue.Empty:
+          break
+        self._ProcessSymbolOutput(lines)
+
+    def RecycleIfNecessary(self):
+      """Restarts the process if it has been used for too long.
+
+      A long running addr2line process will consume excessive amounts
+      of memory without any gain in performance."""
+      if self._processed_symbols_count >= ADDR2LINE_RECYCLE_LIMIT:
+        self._RestartAddr2LineProcess()
+
+
+    def Terminate(self):
+      """Kills the underlying addr2line process.
+
+      The poller |_thread| will terminate as well due to the broken pipe."""
+      try:
+        self._proc.kill()
+        self._proc.communicate()  # Essentially wait() without risking deadlock.
+      except Exception: # pylint: disable=broad-except
+        # An exception while terminating? How interesting.
+        pass
+      self._proc = None
+
+    def _WriteToA2lStdin(self, addr):
+      self._proc.stdin.write('%s\n' % hex(addr))
+      if self._symbolizer.inlines:
+        # In the case of inlines we output an extra blank line, which causes
+        # addr2line to emit a (??,??:0) tuple that we use as a boundary marker.
+        self._proc.stdin.write('\n')
+      self._proc.stdin.flush()
+
+    def _ProcessSymbolOutput(self, lines):
+      """Parses an addr2line symbol output and triggers the client callback."""
+      (_, callback_arg, _) = self._request_queue.popleft()
+      self.queue_size -= 1
+
+      innermost_sym_info = None
+      sym_info = None
+      for (line1, line2) in lines:
+        prev_sym_info = sym_info
+        name = line1 if not line1.startswith('?') else None
+        source_path = None
+        source_line = None
+        m = ELFSymbolizer.Addr2Line.SYM_ADDR_RE.match(line2)
+        if m:
+          if not m.group(1).startswith('?'):
+            source_path = m.group(1)
+            if not m.group(2).startswith('?'):
+              source_line = int(m.group(2))
+        else:
+          logging.warning('Got invalid symbol path from addr2line: %s', line2)
+
+        # In case disambiguation is on, and needed
+        was_ambiguous = False
+        disambiguated = False
+        if self._symbolizer.disambiguate:
+          if source_path and not posixpath.isabs(source_path):
+            path = self._symbolizer.disambiguation_table.get(source_path)
+            was_ambiguous = True
+            disambiguated = path is not None
+            source_path = path if disambiguated else source_path
+
+          # Use absolute paths (so that paths are consistent, as disambiguation
+          # uses absolute paths)
+          if source_path and not was_ambiguous:
+            source_path = os.path.abspath(source_path)
+
+        if source_path and self._symbolizer.strip_base_path:
+          # Strip the base path
+          source_path = re.sub('^' + self._symbolizer.strip_base_path,
+              self._symbolizer.source_root_path or '', source_path)
+
+        sym_info = ELFSymbolInfo(name, source_path, source_line, was_ambiguous,
+                                 disambiguated)
+        if prev_sym_info:
+          prev_sym_info.inlined_by = sym_info
+        if not innermost_sym_info:
+          innermost_sym_info = sym_info
+
+      self._processed_symbols_count += 1
+      self._symbolizer.callback(innermost_sym_info, callback_arg)
+
+    def _RestartAddr2LineProcess(self):
+      if self._proc:
+        self.Terminate()
+
+      # The only reason of existence of this Queue (and the corresponding
+      # Thread below) is the lack of a subprocess.stdout.poll_avail_lines().
+      # Essentially this is a pipe able to extract a couple of lines atomically.
+      self._out_queue = Queue.Queue()
+
+      # Start the underlying addr2line process in line buffered mode.
+
+      cmd = [self._symbolizer.addr2line_path, '--functions', '--demangle',
+          '--exe=' + self._symbolizer.elf_file_path]
+      if self._symbolizer.inlines:
+        cmd += ['--inlines']
+      self._proc = subprocess.Popen(cmd, bufsize=1, stdout=subprocess.PIPE,
+          stdin=subprocess.PIPE, stderr=sys.stderr, close_fds=True)
+
+      # Start the poller thread, which simply moves atomically the lines read
+      # from the addr2line's stdout to the |_out_queue|.
+      self._thread = threading.Thread(
+          target=ELFSymbolizer.Addr2Line.StdoutReaderThread,
+          args=(self._proc.stdout, self._out_queue, self._symbolizer.inlines))
+      self._thread.daemon = True  # Don't prevent early process exit.
+      self._thread.start()
+
+      self._processed_symbols_count = 0
+
+      # Replay the pending requests on the new process (only for the case
+      # of a hung addr2line timing out during the game).
+      for (addr, _, _) in self._request_queue:
+        self._WriteToA2lStdin(addr)
+
+    @staticmethod
+    def StdoutReaderThread(process_pipe, queue, inlines):
+      """The poller thread fn, which moves the addr2line stdout to the |queue|.
+
+      This is the only piece of code not running on the main thread. It merely
+      writes to a Queue, which is thread-safe. In the case of inlines, it
+      detects the ??,??:0 marker and sends the lines atomically, such that the
+      main thread always receives all the lines corresponding to one symbol in
+      one shot."""
+      try:
+        lines_for_one_symbol = []
+        while True:
+          line1 = process_pipe.readline().rstrip('\r\n')
+          line2 = process_pipe.readline().rstrip('\r\n')
+          if not line1 or not line2:
+            break
+          inline_has_more_lines = inlines and (len(lines_for_one_symbol) == 0 or
+                                  (line1 != '??' and line2 != '??:0'))
+          if not inlines or inline_has_more_lines:
+            lines_for_one_symbol += [(line1, line2)]
+          if inline_has_more_lines:
+            continue
+          queue.put(lines_for_one_symbol)
+          lines_for_one_symbol = []
+        process_pipe.close()
+
+      # Every addr2line processes will die at some point, please die silently.
+      except (IOError, OSError):
+        pass
+
+    @property
+    def first_request_id(self):
+      """Returns the request_id of the oldest pending request in the queue."""
+      return self._request_queue[0][2] if self._request_queue else 0
+
+
+class ELFSymbolInfo(object):
+  """The result of the symbolization passed as first arg. of each callback."""
+
+  def __init__(self, name, source_path, source_line, was_ambiguous=False,
+               disambiguated=False):
+    """All the fields here can be None (if addr2line replies with '??')."""
+    self.name = name
+    self.source_path = source_path
+    self.source_line = source_line
+    # In the case of |inlines|=True, the |inlined_by| points to the outer
+    # function inlining the current one (and so on, to form a chain).
+    self.inlined_by = None
+    self.disambiguated = disambiguated
+    self.was_ambiguous = was_ambiguous
+
+  def __str__(self):
+    return '%s [%s:%d]' % (
+        self.name or '??', self.source_path or '??', self.source_line or 0)
diff --git a/build/android/pylib/symbols/elf_symbolizer_unittest.py b/build/android/pylib/symbols/elf_symbolizer_unittest.py
new file mode 100755
index 0000000..1d95b15
--- /dev/null
+++ b/build/android/pylib/symbols/elf_symbolizer_unittest.py
@@ -0,0 +1,171 @@
+#!/usr/bin/env python
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import functools
+import logging
+import os
+import unittest
+
+from pylib.symbols import elf_symbolizer
+from pylib.symbols import mock_addr2line
+
+
+_MOCK_A2L_PATH = os.path.join(os.path.dirname(mock_addr2line.__file__),
+                              'mock_addr2line')
+_INCOMPLETE_MOCK_ADDR = 1024 * 1024
+_UNKNOWN_MOCK_ADDR = 2 * 1024 * 1024
+_INLINE_MOCK_ADDR = 3 * 1024 * 1024
+
+
+class ELFSymbolizerTest(unittest.TestCase):
+  def setUp(self):
+    self._callback = functools.partial(
+        ELFSymbolizerTest._SymbolizeCallback, self)
+    self._resolved_addresses = set()
+    # Mute warnings, we expect them due to the crash/hang tests.
+    logging.getLogger().setLevel(logging.ERROR)
+
+  def testParallelism1(self):
+    self._RunTest(max_concurrent_jobs=1, num_symbols=100)
+
+  def testParallelism4(self):
+    self._RunTest(max_concurrent_jobs=4, num_symbols=100)
+
+  def testParallelism8(self):
+    self._RunTest(max_concurrent_jobs=8, num_symbols=100)
+
+  def testCrash(self):
+    os.environ['MOCK_A2L_CRASH_EVERY'] = '99'
+    self._RunTest(max_concurrent_jobs=1, num_symbols=100)
+    os.environ['MOCK_A2L_CRASH_EVERY'] = '0'
+
+  def testHang(self):
+    os.environ['MOCK_A2L_HANG_EVERY'] = '99'
+    self._RunTest(max_concurrent_jobs=1, num_symbols=100)
+    os.environ['MOCK_A2L_HANG_EVERY'] = '0'
+
+  def testInlines(self):
+    """Stimulate the inline processing logic."""
+    symbolizer = elf_symbolizer.ELFSymbolizer(
+        elf_file_path='/path/doesnt/matter/mock_lib1.so',
+        addr2line_path=_MOCK_A2L_PATH,
+        callback=self._callback,
+        inlines=True,
+        max_concurrent_jobs=4)
+
+    for addr in xrange(1000):
+      exp_inline = False
+      exp_unknown = False
+
+      # First 100 addresses with inlines.
+      if addr < 100:
+        addr += _INLINE_MOCK_ADDR
+        exp_inline = True
+
+      # Followed by 100 without inlines.
+      elif addr < 200:
+        pass
+
+      # Followed by 100 interleaved inlines and not inlines.
+      elif addr < 300:
+        if addr & 1:
+          addr += _INLINE_MOCK_ADDR
+          exp_inline = True
+
+      # Followed by 100 interleaved inlines and unknonwn.
+      elif addr < 400:
+        if addr & 1:
+          addr += _INLINE_MOCK_ADDR
+          exp_inline = True
+        else:
+          addr += _UNKNOWN_MOCK_ADDR
+          exp_unknown = True
+
+      exp_name = 'mock_sym_for_addr_%d' % addr if not exp_unknown else None
+      exp_source_path = 'mock_src/mock_lib1.so.c' if not exp_unknown else None
+      exp_source_line = addr if not exp_unknown else None
+      cb_arg = (addr, exp_name, exp_source_path, exp_source_line, exp_inline)
+      symbolizer.SymbolizeAsync(addr, cb_arg)
+
+    symbolizer.Join()
+
+  def testIncompleteSyminfo(self):
+    """Stimulate the symbol-not-resolved logic."""
+    symbolizer = elf_symbolizer.ELFSymbolizer(
+        elf_file_path='/path/doesnt/matter/mock_lib1.so',
+        addr2line_path=_MOCK_A2L_PATH,
+        callback=self._callback,
+        max_concurrent_jobs=1)
+
+    # Test symbols with valid name but incomplete path.
+    addr = _INCOMPLETE_MOCK_ADDR
+    exp_name = 'mock_sym_for_addr_%d' % addr
+    exp_source_path = None
+    exp_source_line = None
+    cb_arg = (addr, exp_name, exp_source_path, exp_source_line, False)
+    symbolizer.SymbolizeAsync(addr, cb_arg)
+
+    # Test symbols with no name or sym info.
+    addr = _UNKNOWN_MOCK_ADDR
+    exp_name = None
+    exp_source_path = None
+    exp_source_line = None
+    cb_arg = (addr, exp_name, exp_source_path, exp_source_line, False)
+    symbolizer.SymbolizeAsync(addr, cb_arg)
+
+    symbolizer.Join()
+
+  def _RunTest(self, max_concurrent_jobs, num_symbols):
+    symbolizer = elf_symbolizer.ELFSymbolizer(
+        elf_file_path='/path/doesnt/matter/mock_lib1.so',
+        addr2line_path=_MOCK_A2L_PATH,
+        callback=self._callback,
+        max_concurrent_jobs=max_concurrent_jobs,
+        addr2line_timeout=0.5)
+
+    for addr in xrange(num_symbols):
+      exp_name = 'mock_sym_for_addr_%d' % addr
+      exp_source_path = 'mock_src/mock_lib1.so.c'
+      exp_source_line = addr
+      cb_arg = (addr, exp_name, exp_source_path, exp_source_line, False)
+      symbolizer.SymbolizeAsync(addr, cb_arg)
+
+    symbolizer.Join()
+
+    # Check that all the expected callbacks have been received.
+    for addr in xrange(num_symbols):
+      self.assertIn(addr, self._resolved_addresses)
+      self._resolved_addresses.remove(addr)
+
+    # Check for unexpected callbacks.
+    self.assertEqual(len(self._resolved_addresses), 0)
+
+  def _SymbolizeCallback(self, sym_info, cb_arg):
+    self.assertTrue(isinstance(sym_info, elf_symbolizer.ELFSymbolInfo))
+    self.assertTrue(isinstance(cb_arg, tuple))
+    self.assertEqual(len(cb_arg), 5)
+
+    # Unpack expectations from the callback extra argument.
+    (addr, exp_name, exp_source_path, exp_source_line, exp_inlines) = cb_arg
+    if exp_name is None:
+      self.assertIsNone(sym_info.name)
+    else:
+      self.assertTrue(sym_info.name.startswith(exp_name))
+    self.assertEqual(sym_info.source_path, exp_source_path)
+    self.assertEqual(sym_info.source_line, exp_source_line)
+
+    if exp_inlines:
+      self.assertEqual(sym_info.name, exp_name + '_inner')
+      self.assertEqual(sym_info.inlined_by.name, exp_name + '_middle')
+      self.assertEqual(sym_info.inlined_by.inlined_by.name,
+                       exp_name + '_outer')
+
+    # Check against duplicate callbacks.
+    self.assertNotIn(addr, self._resolved_addresses)
+    self._resolved_addresses.add(addr)
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/build/android/pylib/symbols/mock_addr2line/__init__.py b/build/android/pylib/symbols/mock_addr2line/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/build/android/pylib/symbols/mock_addr2line/__init__.py
diff --git a/build/android/pylib/symbols/mock_addr2line/mock_addr2line b/build/android/pylib/symbols/mock_addr2line/mock_addr2line
new file mode 100755
index 0000000..cd58f56
--- /dev/null
+++ b/build/android/pylib/symbols/mock_addr2line/mock_addr2line
@@ -0,0 +1,79 @@
+#!/usr/bin/env python
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Simple mock for addr2line.
+
+Outputs mock symbol information, with each symbol being a function of the
+original address (so it is easy to double-check consistency in unittests).
+"""
+
+import optparse
+import os
+import posixpath
+import sys
+import time
+
+
+def main(argv):
+  parser = optparse.OptionParser()
+  parser.add_option('-e', '--exe', dest='exe')  # Path of the debug-library.so.
+  # Silently swallow the other unnecessary arguments.
+  parser.add_option('-C', '--demangle', action='store_true')
+  parser.add_option('-f', '--functions', action='store_true')
+  parser.add_option('-i', '--inlines', action='store_true')
+  options, _ = parser.parse_args(argv[1:])
+  lib_file_name = posixpath.basename(options.exe)
+  processed_sym_count = 0
+  crash_every = int(os.environ.get('MOCK_A2L_CRASH_EVERY', 0))
+  hang_every = int(os.environ.get('MOCK_A2L_HANG_EVERY', 0))
+
+  while(True):
+    line = sys.stdin.readline().rstrip('\r')
+    if not line:
+      break
+
+    # An empty line should generate '??,??:0' (is used as marker for inlines).
+    if line == '\n':
+      print '??'
+      print '??:0'
+      sys.stdout.flush()
+      continue
+
+    addr = int(line, 16)
+    processed_sym_count += 1
+    if crash_every and processed_sym_count % crash_every == 0:
+      sys.exit(1)
+    if hang_every and processed_sym_count % hang_every == 0:
+      time.sleep(1)
+
+    # Addresses < 1M will return good mock symbol information.
+    if addr < 1024 * 1024:
+      print 'mock_sym_for_addr_%d' % addr
+      print 'mock_src/%s.c:%d' % (lib_file_name, addr)
+
+    # Addresses 1M <= x < 2M will return symbols with a name but a missing path.
+    elif addr < 2 * 1024 * 1024:
+      print 'mock_sym_for_addr_%d' % addr
+      print '??:0'
+
+    # Addresses 2M <= x < 3M will return unknown symbol information.
+    elif addr < 3 * 1024 * 1024:
+      print '??'
+      print '??'
+
+    # Addresses 3M <= x < 4M will return inlines.
+    elif addr < 4 * 1024 * 1024:
+      print 'mock_sym_for_addr_%d_inner' % addr
+      print 'mock_src/%s.c:%d' % (lib_file_name, addr)
+      print 'mock_sym_for_addr_%d_middle' % addr
+      print 'mock_src/%s.c:%d' % (lib_file_name, addr)
+      print 'mock_sym_for_addr_%d_outer' % addr
+      print 'mock_src/%s.c:%d' % (lib_file_name, addr)
+
+    sys.stdout.flush()
+
+
+if __name__ == '__main__':
+  main(sys.argv)
\ No newline at end of file
diff --git a/build/android/pylib/uirobot/__init__.py b/build/android/pylib/uirobot/__init__.py
new file mode 100644
index 0000000..5cac026
--- /dev/null
+++ b/build/android/pylib/uirobot/__init__.py
@@ -0,0 +1,4 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
diff --git a/build/android/pylib/uirobot/uirobot_test_instance.py b/build/android/pylib/uirobot/uirobot_test_instance.py
new file mode 100644
index 0000000..1891ab7
--- /dev/null
+++ b/build/android/pylib/uirobot/uirobot_test_instance.py
@@ -0,0 +1,77 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import logging
+
+from devil.android import apk_helper
+from pylib.base import test_instance
+
+class UirobotTestInstance(test_instance.TestInstance):
+
+  def __init__(self, args, error_func):
+    """Constructor.
+
+    Args:
+      args: Command line arguments.
+    """
+    super(UirobotTestInstance, self).__init__()
+    if not args.app_under_test:
+      error_func('Must set --app-under-test.')
+    self._app_under_test = args.app_under_test
+    self._minutes = args.minutes
+
+    if args.remote_device_file:
+      with open(args.remote_device_file) as remote_device_file:
+        device_json = json.load(remote_device_file)
+    else:
+      device_json = {}
+    device_type = device_json.get('device_type', 'Android')
+    if args.device_type:
+      if device_type and device_type != args.device_type:
+        logging.info('Overriding device_type from %s to %s',
+                     device_type, args.device_type)
+      device_type = args.device_type
+
+    if device_type == 'Android':
+      self._suite = 'Android Uirobot'
+      self._package_name = apk_helper.GetPackageName(self._app_under_test)
+    elif device_type == 'iOS':
+      self._suite = 'iOS Uirobot'
+      self._package_name = self._app_under_test
+
+
+  #override
+  def TestType(self):
+    """Returns type of test."""
+    return 'uirobot'
+
+  #override
+  def SetUp(self):
+    """Setup for test."""
+    pass
+
+  #override
+  def TearDown(self):
+    """Teardown for test."""
+    pass
+
+  @property
+  def app_under_test(self):
+    """Returns the app to run the test on."""
+    return self._app_under_test
+
+  @property
+  def minutes(self):
+    """Returns the number of minutes to run the uirobot for."""
+    return self._minutes
+
+  @property
+  def package_name(self):
+    """Returns the name of the package in the APK."""
+    return self._package_name
+
+  @property
+  def suite(self):
+    return self._suite
diff --git a/build/android/pylib/utils/__init__.py b/build/android/pylib/utils/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/build/android/pylib/utils/__init__.py
diff --git a/build/android/pylib/utils/apk_helper.py b/build/android/pylib/utils/apk_helper.py
new file mode 100644
index 0000000..dd45807
--- /dev/null
+++ b/build/android/pylib/utils/apk_helper.py
@@ -0,0 +1,8 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=unused-wildcard-import
+# pylint: disable=wildcard-import
+
+from devil.android.apk_helper import *
diff --git a/build/android/pylib/utils/argparse_utils.py b/build/android/pylib/utils/argparse_utils.py
new file mode 100644
index 0000000..e456d9d
--- /dev/null
+++ b/build/android/pylib/utils/argparse_utils.py
@@ -0,0 +1,50 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import argparse
+
+
+class CustomHelpAction(argparse.Action):
+  '''Allows defining custom help actions.
+
+  Help actions can run even when the parser would otherwise fail on missing
+  arguments. The first help or custom help command mentioned on the command
+  line will have its help text displayed.
+
+  Usage:
+      parser = argparse.ArgumentParser(...)
+      CustomHelpAction.EnableFor(parser)
+      parser.add_argument('--foo-help',
+                          action='custom_help',
+                          custom_help_text='this is the help message',
+                          help='What this helps with')
+  '''
+  # Derived from argparse._HelpAction from
+  # https://github.com/python/cpython/blob/master/Lib/argparse.py
+
+  # pylint: disable=redefined-builtin
+  # (complains about 'help' being redefined)
+  def __init__(self,
+               option_strings,
+               dest=argparse.SUPPRESS,
+               default=argparse.SUPPRESS,
+               custom_help_text=None,
+               help=None):
+    super(CustomHelpAction, self).__init__(option_strings=option_strings,
+                                           dest=dest,
+                                           default=default,
+                                           nargs=0,
+                                           help=help)
+
+    if not custom_help_text:
+      raise ValueError('custom_help_text is required')
+    self._help_text = custom_help_text
+
+  def __call__(self, parser, namespace, values, option_string=None):
+    print self._help_text
+    parser.exit()
+
+  @staticmethod
+  def EnableFor(parser):
+    parser.register('action', 'custom_help', CustomHelpAction)
diff --git a/build/android/pylib/utils/base_error.py b/build/android/pylib/utils/base_error.py
new file mode 100644
index 0000000..263479a
--- /dev/null
+++ b/build/android/pylib/utils/base_error.py
@@ -0,0 +1,8 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=unused-wildcard-import
+# pylint: disable=wildcard-import
+
+from devil.base_error import *
diff --git a/build/android/pylib/utils/command_option_parser.py b/build/android/pylib/utils/command_option_parser.py
new file mode 100644
index 0000000..cf501d0
--- /dev/null
+++ b/build/android/pylib/utils/command_option_parser.py
@@ -0,0 +1,75 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""An option parser which handles the first arg as a command.
+
+Add other nice functionality such as printing a list of commands
+and an example in usage.
+"""
+
+import optparse
+import sys
+
+
+class CommandOptionParser(optparse.OptionParser):
+  """Wrapper class for OptionParser to help with listing commands."""
+
+  def __init__(self, *args, **kwargs):
+    """Creates a CommandOptionParser.
+
+    Args:
+      commands_dict: A dictionary mapping command strings to an object defining
+          - add_options_func: Adds options to the option parser
+          - run_command_func: Runs the command itself.
+      example: An example command.
+      everything else: Passed to optparse.OptionParser contructor.
+    """
+    self.commands_dict = kwargs.pop('commands_dict', {})
+    self.example = kwargs.pop('example', '')
+    if not 'usage' in kwargs:
+      kwargs['usage'] = 'Usage: %prog <command> [options]'
+    optparse.OptionParser.__init__(self, *args, **kwargs)
+
+  #override
+  def get_usage(self):
+    normal_usage = optparse.OptionParser.get_usage(self)
+    command_list = self.get_command_list()
+    example = self.get_example()
+    return self.expand_prog_name(normal_usage + example + command_list)
+
+  #override
+  def get_command_list(self):
+    if self.commands_dict.keys():
+      return '\nCommands:\n  %s\n' % '\n  '.join(
+          sorted(self.commands_dict.keys()))
+    return ''
+
+  def get_example(self):
+    if self.example:
+      return '\nExample:\n  %s\n' % self.example
+    return ''
+
+
+def ParseAndExecute(option_parser, argv=None):
+  """Parses options/args from argv and runs the specified command.
+
+  Args:
+    option_parser: A CommandOptionParser object.
+    argv: Command line arguments. If None, automatically draw from sys.argv.
+
+  Returns:
+    An exit code.
+  """
+  if not argv:
+    argv = sys.argv
+
+    if len(argv) < 2 or argv[1] not in option_parser.commands_dict:
+      # Parse args first, if this is '--help', optparse will print help and exit
+      option_parser.parse_args(argv)
+      option_parser.error('Invalid command.')
+
+    cmd = option_parser.commands_dict[argv[1]]
+    cmd.add_options_func(option_parser)
+    options, args = option_parser.parse_args(argv)
+    return cmd.run_command_func(argv[1], options, args, option_parser)
diff --git a/build/android/pylib/utils/device_temp_file.py b/build/android/pylib/utils/device_temp_file.py
new file mode 100644
index 0000000..ae1edc8
--- /dev/null
+++ b/build/android/pylib/utils/device_temp_file.py
@@ -0,0 +1,8 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=unused-wildcard-import
+# pylint: disable=wildcard-import
+
+from devil.android.device_temp_file import *
diff --git a/build/android/pylib/utils/emulator.py b/build/android/pylib/utils/emulator.py
new file mode 100644
index 0000000..e2a5fea
--- /dev/null
+++ b/build/android/pylib/utils/emulator.py
@@ -0,0 +1,520 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Provides an interface to start and stop Android emulator.
+
+  Emulator: The class provides the methods to launch/shutdown the emulator with
+            the android virtual device named 'avd_armeabi' .
+"""
+
+import logging
+import os
+import signal
+import subprocess
+import time
+
+from devil.android import device_errors
+from devil.android import device_utils
+from devil.android.sdk import adb_wrapper
+from devil.utils import cmd_helper
+from pylib import constants
+from pylib import pexpect
+from pylib.utils import time_profile
+
+# Default sdcard size in the format of [amount][unit]
+DEFAULT_SDCARD_SIZE = '512M'
+# Default internal storage (MB) of emulator image
+DEFAULT_STORAGE_SIZE = '1024M'
+
+# Each emulator has 60 secs of wait time for launching
+_BOOT_WAIT_INTERVALS = 6
+_BOOT_WAIT_INTERVAL_TIME = 10
+
+# Path for avd files and avd dir
+_BASE_AVD_DIR = os.path.expanduser(os.path.join('~', '.android', 'avd'))
+_TOOLS_ANDROID_PATH = os.path.join(constants.ANDROID_SDK_ROOT,
+                                   'tools', 'android')
+
+# Template used to generate config.ini files for the emulator
+CONFIG_TEMPLATE = """avd.ini.encoding=ISO-8859-1
+hw.dPad=no
+hw.lcd.density=320
+sdcard.size={sdcard.size}
+hw.cpu.arch={hw.cpu.arch}
+hw.device.hash=-708107041
+hw.camera.back=none
+disk.dataPartition.size=800M
+hw.gpu.enabled={gpu}
+skin.path=720x1280
+skin.dynamic=yes
+hw.keyboard=yes
+hw.ramSize=1024
+hw.device.manufacturer=Google
+hw.sdCard=yes
+hw.mainKeys=no
+hw.accelerometer=yes
+skin.name=720x1280
+abi.type={abi.type}
+hw.trackBall=no
+hw.device.name=Galaxy Nexus
+hw.battery=yes
+hw.sensors.proximity=yes
+image.sysdir.1=system-images/android-{api.level}/default/{abi.type}/
+hw.sensors.orientation=yes
+hw.audioInput=yes
+hw.camera.front=none
+hw.gps=yes
+vm.heapSize=128
+{extras}"""
+
+CONFIG_REPLACEMENTS = {
+  'x86': {
+    '{hw.cpu.arch}': 'x86',
+    '{abi.type}': 'x86',
+    '{extras}': ''
+  },
+  'arm': {
+    '{hw.cpu.arch}': 'arm',
+    '{abi.type}': 'armeabi-v7a',
+    '{extras}': 'hw.cpu.model=cortex-a8\n'
+  },
+  'mips': {
+    '{hw.cpu.arch}': 'mips',
+    '{abi.type}': 'mips',
+    '{extras}': ''
+  }
+}
+
+class EmulatorLaunchException(Exception):
+  """Emulator failed to launch."""
+  pass
+
+def WaitForEmulatorLaunch(num):
+  """Wait for emulators to finish booting
+
+  Emulators on bots are launch with a separate background process, to avoid
+  running tests before the emulators are fully booted, this function waits for
+  a number of emulators to finish booting
+
+  Arg:
+    num: the amount of emulators to wait.
+  """
+  for _ in range(num*_BOOT_WAIT_INTERVALS):
+    emulators = [device_utils.DeviceUtils(a)
+                 for a in adb_wrapper.AdbWrapper.Devices()
+                 if a.is_emulator]
+    if len(emulators) >= num:
+      logging.info('All %d emulators launched', num)
+      return
+    logging.info(
+        'Waiting for %d emulators, %d of them already launched', num,
+        len(emulators))
+    time.sleep(_BOOT_WAIT_INTERVAL_TIME)
+  raise Exception("Expected %d emulators, %d launched within time limit" %
+                  (num, len(emulators)))
+
+def KillAllEmulators():
+  """Kill all running emulators that look like ones we started.
+
+  There are odd 'sticky' cases where there can be no emulator process
+  running but a device slot is taken.  A little bot trouble and we're out of
+  room forever.
+  """
+  logging.info('Killing all existing emulators and existing the program')
+  emulators = [device_utils.DeviceUtils(a)
+               for a in adb_wrapper.AdbWrapper.Devices()
+               if a.is_emulator]
+  if not emulators:
+    return
+  for e in emulators:
+    e.adb.Emu(['kill'])
+  logging.info('Emulator killing is async; give a few seconds for all to die.')
+  for _ in range(10):
+    if not any(a.is_emulator for a in adb_wrapper.AdbWrapper.Devices()):
+      return
+    time.sleep(1)
+
+
+def DeleteAllTempAVDs():
+  """Delete all temporary AVDs which are created for tests.
+
+  If the test exits abnormally and some temporary AVDs created when testing may
+  be left in the system. Clean these AVDs.
+  """
+  logging.info('Deleting all the avd files')
+  avds = device_utils.GetAVDs()
+  if not avds:
+    return
+  for avd_name in avds:
+    if 'run_tests_avd' in avd_name:
+      cmd = [_TOOLS_ANDROID_PATH, '-s', 'delete', 'avd', '--name', avd_name]
+      cmd_helper.RunCmd(cmd)
+      logging.info('Delete AVD %s', avd_name)
+
+
+class PortPool(object):
+  """Pool for emulator port starting position that changes over time."""
+  _port_min = 5554
+  _port_max = 5585
+  _port_current_index = 0
+
+  @classmethod
+  def port_range(cls):
+    """Return a range of valid ports for emulator use.
+
+    The port must be an even number between 5554 and 5584.  Sometimes
+    a killed emulator "hangs on" to a port long enough to prevent
+    relaunch.  This is especially true on slow machines (like a bot).
+    Cycling through a port start position helps make us resilient."""
+    ports = range(cls._port_min, cls._port_max, 2)
+    n = cls._port_current_index
+    cls._port_current_index = (n + 1) % len(ports)
+    return ports[n:] + ports[:n]
+
+
+def _GetAvailablePort():
+  """Returns an available TCP port for the console."""
+  used_ports = []
+  emulators = [device_utils.DeviceUtils(a)
+               for a in adb_wrapper.AdbWrapper.Devices()
+               if a.is_emulator]
+  for emulator in emulators:
+    used_ports.append(emulator.adb.GetDeviceSerial().split('-')[1])
+  for port in PortPool.port_range():
+    if str(port) not in used_ports:
+      return port
+
+
+def LaunchTempEmulators(emulator_count, abi, api_level, enable_kvm=False,
+                        kill_and_launch=True, sdcard_size=DEFAULT_SDCARD_SIZE,
+                        storage_size=DEFAULT_STORAGE_SIZE, wait_for_boot=True,
+                        headless=False):
+  """Create and launch temporary emulators and wait for them to boot.
+
+  Args:
+    emulator_count: number of emulators to launch.
+    abi: the emulator target platform
+    api_level: the api level (e.g., 19 for Android v4.4 - KitKat release)
+    wait_for_boot: whether or not to wait for emulators to boot up
+    headless: running emulator with no ui
+
+  Returns:
+    List of emulators.
+  """
+  emulators = []
+  for n in xrange(emulator_count):
+    t = time_profile.TimeProfile('Emulator launch %d' % n)
+    # Creates a temporary AVD.
+    avd_name = 'run_tests_avd_%d' % n
+    logging.info('Emulator launch %d with avd_name=%s and api=%d',
+                 n, avd_name, api_level)
+    emulator = Emulator(avd_name, abi, enable_kvm=enable_kvm,
+                        sdcard_size=sdcard_size, storage_size=storage_size,
+                        headless=headless)
+    emulator.CreateAVD(api_level)
+    emulator.Launch(kill_all_emulators=(n == 0 and kill_and_launch))
+    t.Stop()
+    emulators.append(emulator)
+  # Wait for all emulators to boot completed.
+  if wait_for_boot:
+    for emulator in emulators:
+      emulator.ConfirmLaunch(True)
+    logging.info('All emulators are fully booted')
+  return emulators
+
+
+def LaunchEmulator(avd_name, abi, kill_and_launch=True, enable_kvm=False,
+                   sdcard_size=DEFAULT_SDCARD_SIZE,
+                   storage_size=DEFAULT_STORAGE_SIZE, headless=False):
+  """Launch an existing emulator with name avd_name.
+
+  Args:
+    avd_name: name of existing emulator
+    abi: the emulator target platform
+    headless: running emulator with no ui
+
+  Returns:
+    emulator object.
+  """
+  logging.info('Specified emulator named avd_name=%s launched', avd_name)
+  emulator = Emulator(avd_name, abi, enable_kvm=enable_kvm,
+                      sdcard_size=sdcard_size, storage_size=storage_size,
+                      headless=headless)
+  emulator.Launch(kill_all_emulators=kill_and_launch)
+  emulator.ConfirmLaunch(True)
+  return emulator
+
+
+class Emulator(object):
+  """Provides the methods to launch/shutdown the emulator.
+
+  The emulator has the android virtual device named 'avd_armeabi'.
+
+  The emulator could use any even TCP port between 5554 and 5584 for the
+  console communication, and this port will be part of the device name like
+  'emulator-5554'. Assume it is always True, as the device name is the id of
+  emulator managed in this class.
+
+  Attributes:
+    emulator: Path of Android's emulator tool.
+    popen: Popen object of the running emulator process.
+    device: Device name of this emulator.
+  """
+
+  # Signals we listen for to kill the emulator on
+  _SIGNALS = (signal.SIGINT, signal.SIGHUP)
+
+  # Time to wait for an emulator launch, in seconds.  This includes
+  # the time to launch the emulator and a wait-for-device command.
+  _LAUNCH_TIMEOUT = 120
+
+  # Timeout interval of wait-for-device command before bouncing to a a
+  # process life check.
+  _WAITFORDEVICE_TIMEOUT = 5
+
+  # Time to wait for a 'wait for boot complete' (property set on device).
+  _WAITFORBOOT_TIMEOUT = 300
+
+  def __init__(self, avd_name, abi, enable_kvm=False,
+               sdcard_size=DEFAULT_SDCARD_SIZE,
+               storage_size=DEFAULT_STORAGE_SIZE, headless=False):
+    """Init an Emulator.
+
+    Args:
+      avd_name: name of the AVD to create
+      abi: target platform for emulator being created, defaults to x86
+    """
+    android_sdk_root = constants.ANDROID_SDK_ROOT
+    self.emulator = os.path.join(android_sdk_root, 'tools', 'emulator')
+    self.android = _TOOLS_ANDROID_PATH
+    self.popen = None
+    self.device_serial = None
+    self.abi = abi
+    self.avd_name = avd_name
+    self.sdcard_size = sdcard_size
+    self.storage_size = storage_size
+    self.enable_kvm = enable_kvm
+    self.headless = headless
+
+  @staticmethod
+  def _DeviceName():
+    """Return our device name."""
+    port = _GetAvailablePort()
+    return ('emulator-%d' % port, port)
+
+  def CreateAVD(self, api_level):
+    """Creates an AVD with the given name.
+
+    Args:
+      api_level: the api level of the image
+
+    Return avd_name.
+    """
+
+    if self.abi == 'arm':
+      abi_option = 'armeabi-v7a'
+    elif self.abi == 'mips':
+      abi_option = 'mips'
+    else:
+      abi_option = 'x86'
+
+    api_target = 'android-%s' % api_level
+
+    avd_command = [
+        self.android,
+        '--silent',
+        'create', 'avd',
+        '--name', self.avd_name,
+        '--abi', abi_option,
+        '--target', api_target,
+        '--sdcard', self.sdcard_size,
+        '--force',
+    ]
+    avd_cmd_str = ' '.join(avd_command)
+    logging.info('Create AVD command: %s', avd_cmd_str)
+    avd_process = pexpect.spawn(avd_cmd_str)
+
+    # Instead of creating a custom profile, we overwrite config files.
+    avd_process.expect('Do you wish to create a custom hardware profile')
+    avd_process.sendline('no\n')
+    avd_process.expect('Created AVD \'%s\'' % self.avd_name)
+
+    # Replace current configuration with default Galaxy Nexus config.
+    ini_file = os.path.join(_BASE_AVD_DIR, '%s.ini' % self.avd_name)
+    new_config_ini = os.path.join(_BASE_AVD_DIR, '%s.avd' % self.avd_name,
+                                  'config.ini')
+
+    # Remove config files with defaults to replace with Google's GN settings.
+    os.unlink(ini_file)
+    os.unlink(new_config_ini)
+
+    # Create new configuration files with Galaxy Nexus by Google settings.
+    with open(ini_file, 'w') as new_ini:
+      new_ini.write('avd.ini.encoding=ISO-8859-1\n')
+      new_ini.write('target=%s\n' % api_target)
+      new_ini.write('path=%s/%s.avd\n' % (_BASE_AVD_DIR, self.avd_name))
+      new_ini.write('path.rel=avd/%s.avd\n' % self.avd_name)
+
+    custom_config = CONFIG_TEMPLATE
+    replacements = CONFIG_REPLACEMENTS[self.abi]
+    for key in replacements:
+      custom_config = custom_config.replace(key, replacements[key])
+    custom_config = custom_config.replace('{api.level}', str(api_level))
+    custom_config = custom_config.replace('{sdcard.size}', self.sdcard_size)
+    custom_config.replace('{gpu}', 'no' if self.headless else 'yes')
+
+    with open(new_config_ini, 'w') as new_config_ini:
+      new_config_ini.write(custom_config)
+
+    return self.avd_name
+
+
+  def _DeleteAVD(self):
+    """Delete the AVD of this emulator."""
+    avd_command = [
+        self.android,
+        '--silent',
+        'delete',
+        'avd',
+        '--name', self.avd_name,
+    ]
+    logging.info('Delete AVD command: %s', ' '.join(avd_command))
+    cmd_helper.RunCmd(avd_command)
+
+  def ResizeAndWipeAvd(self, storage_size):
+    """Wipes old AVD and creates new AVD of size |storage_size|.
+
+    This serves as a work around for '-partition-size' and '-wipe-data'
+    """
+    userdata_img = os.path.join(_BASE_AVD_DIR, '%s.avd' % self.avd_name,
+                                'userdata.img')
+    userdata_qemu_img = os.path.join(_BASE_AVD_DIR, '%s.avd' % self.avd_name,
+                                     'userdata-qemu.img')
+    resize_cmd = ['resize2fs', userdata_img, '%s' % storage_size]
+    logging.info('Resizing userdata.img to ideal size')
+    cmd_helper.RunCmd(resize_cmd)
+    wipe_cmd = ['cp', userdata_img, userdata_qemu_img]
+    logging.info('Replacing userdata-qemu.img with the new userdata.img')
+    cmd_helper.RunCmd(wipe_cmd)
+
+  def Launch(self, kill_all_emulators):
+    """Launches the emulator asynchronously. Call ConfirmLaunch() to ensure the
+    emulator is ready for use.
+
+    If fails, an exception will be raised.
+    """
+    if kill_all_emulators:
+      KillAllEmulators()  # just to be sure
+    self._AggressiveImageCleanup()
+    (self.device_serial, port) = self._DeviceName()
+    self.ResizeAndWipeAvd(storage_size=self.storage_size)
+    emulator_command = [
+        self.emulator,
+        # Speed up emulator launch by 40%.  Really.
+        '-no-boot-anim',
+        ]
+    if self.headless:
+      emulator_command.extend([
+        '-no-skin',
+        '-no-audio',
+        '-no-window'
+        ])
+    else:
+      emulator_command.extend([
+          '-gpu', 'on'
+        ])
+    emulator_command.extend([
+        # Use a familiar name and port.
+        '-avd', self.avd_name,
+        '-port', str(port),
+        # all the argument after qemu are sub arguments for qemu
+        '-qemu', '-m', '1024',
+        ])
+    if self.abi == 'x86' and self.enable_kvm:
+      emulator_command.extend([
+          # For x86 emulator --enable-kvm will fail early, avoiding accidental
+          # runs in a slow mode (i.e. without hardware virtualization support).
+          '--enable-kvm',
+          ])
+
+    logging.info('Emulator launch command: %s', ' '.join(emulator_command))
+    self.popen = subprocess.Popen(args=emulator_command,
+                                  stderr=subprocess.STDOUT)
+    self._InstallKillHandler()
+
+  @staticmethod
+  def _AggressiveImageCleanup():
+    """Aggressive cleanup of emulator images.
+
+    Experimentally it looks like our current emulator use on the bot
+    leaves image files around in /tmp/android-$USER.  If a "random"
+    name gets reused, we choke with a 'File exists' error.
+    TODO(jrg): is there a less hacky way to accomplish the same goal?
+    """
+    logging.info('Aggressive Image Cleanup')
+    emulator_imagedir = '/tmp/android-%s' % os.environ['USER']
+    if not os.path.exists(emulator_imagedir):
+      return
+    for image in os.listdir(emulator_imagedir):
+      full_name = os.path.join(emulator_imagedir, image)
+      if 'emulator' in full_name:
+        logging.info('Deleting emulator image %s', full_name)
+        os.unlink(full_name)
+
+  def ConfirmLaunch(self, wait_for_boot=False):
+    """Confirm the emulator launched properly.
+
+    Loop on a wait-for-device with a very small timeout.  On each
+    timeout, check the emulator process is still alive.
+    After confirming a wait-for-device can be successful, make sure
+    it returns the right answer.
+    """
+    seconds_waited = 0
+    number_of_waits = 2  # Make sure we can wfd twice
+
+    device = device_utils.DeviceUtils(self.device_serial)
+    while seconds_waited < self._LAUNCH_TIMEOUT:
+      try:
+        device.adb.WaitForDevice(
+            timeout=self._WAITFORDEVICE_TIMEOUT, retries=1)
+        number_of_waits -= 1
+        if not number_of_waits:
+          break
+      except device_errors.CommandTimeoutError:
+        seconds_waited += self._WAITFORDEVICE_TIMEOUT
+        device.adb.KillServer()
+      self.popen.poll()
+      if self.popen.returncode != None:
+        raise EmulatorLaunchException('EMULATOR DIED')
+
+    if seconds_waited >= self._LAUNCH_TIMEOUT:
+      raise EmulatorLaunchException('TIMEOUT with wait-for-device')
+
+    logging.info('Seconds waited on wait-for-device: %d', seconds_waited)
+    if wait_for_boot:
+      # Now that we checked for obvious problems, wait for a boot complete.
+      # Waiting for the package manager is sometimes problematic.
+      device.WaitUntilFullyBooted(timeout=self._WAITFORBOOT_TIMEOUT)
+      logging.info('%s is now fully booted', self.avd_name)
+
+  def Shutdown(self):
+    """Shuts down the process started by launch."""
+    self._DeleteAVD()
+    if self.popen:
+      self.popen.poll()
+      if self.popen.returncode == None:
+        self.popen.kill()
+      self.popen = None
+
+  def _ShutdownOnSignal(self, _signum, _frame):
+    logging.critical('emulator _ShutdownOnSignal')
+    for sig in self._SIGNALS:
+      signal.signal(sig, signal.SIG_DFL)
+    self.Shutdown()
+    raise KeyboardInterrupt  # print a stack
+
+  def _InstallKillHandler(self):
+    """Install a handler to kill the emulator when we exit unexpectedly."""
+    for sig in self._SIGNALS:
+      signal.signal(sig, self._ShutdownOnSignal)
diff --git a/build/android/pylib/utils/findbugs.py b/build/android/pylib/utils/findbugs.py
new file mode 100644
index 0000000..0456893
--- /dev/null
+++ b/build/android/pylib/utils/findbugs.py
@@ -0,0 +1,155 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import xml.dom.minidom
+
+from devil.utils import cmd_helper
+from pylib import constants
+from pylib.constants import host_paths
+
+
+_FINDBUGS_HOME = os.path.join(host_paths.DIR_SOURCE_ROOT, 'third_party',
+                              'findbugs')
+_FINDBUGS_JAR = os.path.join(_FINDBUGS_HOME, 'lib', 'findbugs.jar')
+_FINDBUGS_MAX_HEAP = 768
+_FINDBUGS_PLUGIN_PATH = os.path.join(
+    host_paths.DIR_SOURCE_ROOT, 'tools', 'android', 'findbugs_plugin', 'lib',
+    'chromiumPlugin.jar')
+
+
+def _ParseXmlResults(results_doc):
+  warnings = set()
+  for en in (n for n in results_doc.documentElement.childNodes
+             if n.nodeType == xml.dom.Node.ELEMENT_NODE):
+    if en.tagName == 'BugInstance':
+      warnings.add(_ParseBugInstance(en))
+  return warnings
+
+
+def _GetMessage(node):
+  for c in (n for n in node.childNodes
+            if n.nodeType == xml.dom.Node.ELEMENT_NODE):
+    if c.tagName == 'Message':
+      if (len(c.childNodes) == 1
+          and c.childNodes[0].nodeType == xml.dom.Node.TEXT_NODE):
+        return c.childNodes[0].data
+  return None
+
+
+def _ParseBugInstance(node):
+  bug = FindBugsWarning(node.getAttribute('type'))
+  msg_parts = []
+  for c in (n for n in node.childNodes
+            if n.nodeType == xml.dom.Node.ELEMENT_NODE):
+    if c.tagName == 'Class':
+      msg_parts.append(_GetMessage(c))
+    elif c.tagName == 'Method':
+      msg_parts.append(_GetMessage(c))
+    elif c.tagName == 'Field':
+      msg_parts.append(_GetMessage(c))
+    elif c.tagName == 'SourceLine':
+      bug.file_name = c.getAttribute('sourcefile')
+      if c.hasAttribute('start'):
+        bug.start_line = int(c.getAttribute('start'))
+      if c.hasAttribute('end'):
+        bug.end_line = int(c.getAttribute('end'))
+      msg_parts.append(_GetMessage(c))
+    elif (c.tagName == 'ShortMessage' and len(c.childNodes) == 1
+          and c.childNodes[0].nodeType == xml.dom.Node.TEXT_NODE):
+      msg_parts.append(c.childNodes[0].data)
+  bug.message = tuple(m for m in msg_parts if m)
+  return bug
+
+
+class FindBugsWarning(object):
+
+  def __init__(self, bug_type='', end_line=0, file_name='', message=None,
+               start_line=0):
+    self.bug_type = bug_type
+    self.end_line = end_line
+    self.file_name = file_name
+    if message is None:
+      self.message = tuple()
+    else:
+      self.message = message
+    self.start_line = start_line
+
+  def __cmp__(self, other):
+    return (cmp(self.file_name, other.file_name)
+            or cmp(self.start_line, other.start_line)
+            or cmp(self.end_line, other.end_line)
+            or cmp(self.bug_type, other.bug_type)
+            or cmp(self.message, other.message))
+
+  def __eq__(self, other):
+    return self.__dict__ == other.__dict__
+
+  def __hash__(self):
+    return hash((self.bug_type, self.end_line, self.file_name, self.message,
+                 self.start_line))
+
+  def __ne__(self, other):
+    return not self == other
+
+  def __str__(self):
+    return '%s: %s' % (self.bug_type, '\n  '.join(self.message))
+
+
+def Run(exclude, classes_to_analyze, auxiliary_classes, output_file,
+        findbug_args, jars):
+  """Run FindBugs.
+
+  Args:
+    exclude: the exclude xml file, refer to FindBugs's -exclude command option.
+    classes_to_analyze: the list of classes need to analyze, refer to FindBug's
+                        -onlyAnalyze command line option.
+    auxiliary_classes: the classes help to analyze, refer to FindBug's
+                       -auxclasspath command line option.
+    output_file: An optional path to dump XML results to.
+    findbug_args: A list of addtional command line options to pass to Findbugs.
+  """
+  # TODO(jbudorick): Get this from the build system.
+  system_classes = [
+    os.path.join(constants.ANDROID_SDK_ROOT, 'platforms',
+                 'android-%s' % constants.ANDROID_SDK_VERSION, 'android.jar')
+  ]
+  system_classes.extend(os.path.abspath(classes)
+                        for classes in auxiliary_classes or [])
+
+  cmd = ['java',
+         '-classpath', '%s:' % _FINDBUGS_JAR,
+         '-Xmx%dm' % _FINDBUGS_MAX_HEAP,
+         '-Dfindbugs.home="%s"' % _FINDBUGS_HOME,
+         '-jar', _FINDBUGS_JAR,
+         '-textui', '-sortByClass',
+         '-pluginList', _FINDBUGS_PLUGIN_PATH, '-xml:withMessages']
+  if system_classes:
+    cmd.extend(['-auxclasspath', ':'.join(system_classes)])
+  if classes_to_analyze:
+    cmd.extend(['-onlyAnalyze', classes_to_analyze])
+  if exclude:
+    cmd.extend(['-exclude', os.path.abspath(exclude)])
+  if output_file:
+    cmd.extend(['-output', output_file])
+  if findbug_args:
+    cmd.extend(findbug_args)
+  cmd.extend(os.path.abspath(j) for j in jars or [])
+
+  if output_file:
+    _, _, stderr = cmd_helper.GetCmdStatusOutputAndError(cmd)
+
+    results_doc = xml.dom.minidom.parse(output_file)
+  else:
+    _, raw_out, stderr = cmd_helper.GetCmdStatusOutputAndError(cmd)
+    results_doc = xml.dom.minidom.parseString(raw_out)
+
+  for line in stderr.splitlines():
+    logging.debug('  %s', line)
+
+  current_warnings_set = _ParseXmlResults(results_doc)
+
+  return (' '.join(cmd), current_warnings_set)
+
diff --git a/build/android/pylib/utils/host_utils.py b/build/android/pylib/utils/host_utils.py
new file mode 100644
index 0000000..ba8c9d2
--- /dev/null
+++ b/build/android/pylib/utils/host_utils.py
@@ -0,0 +1,8 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=unused-wildcard-import
+# pylint: disable=wildcard-import
+
+from devil.utils.host_utils import *
diff --git a/build/android/pylib/utils/isolator.py b/build/android/pylib/utils/isolator.py
new file mode 100644
index 0000000..f8177e0
--- /dev/null
+++ b/build/android/pylib/utils/isolator.py
@@ -0,0 +1,192 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import fnmatch
+import glob
+import os
+import shutil
+import sys
+import tempfile
+
+from devil.utils import cmd_helper
+from pylib import constants
+from pylib.constants import host_paths
+
+
+_ISOLATE_SCRIPT = os.path.join(
+    host_paths.DIR_SOURCE_ROOT, 'tools', 'swarming_client', 'isolate.py')
+
+
+def DefaultPathVariables():
+  return {
+    'DEPTH': host_paths.DIR_SOURCE_ROOT,
+    'PRODUCT_DIR': constants.GetOutDirectory(),
+  }
+
+
+def DefaultConfigVariables():
+  # Note: This list must match the --config-vars in build/isolate.gypi
+  return {
+    'CONFIGURATION_NAME': constants.GetBuildType(),
+    'OS': 'android',
+    'asan': '0',
+    'branding': 'Chromium',
+    'chromeos': '0',
+    'component': 'static_library',
+    'enable_pepper_cdms': '0',
+    'enable_plugins': '0',
+    'fastbuild': '0',
+    'icu_use_data_file_flag': '1',
+    'kasko': '0',
+    'lsan': '0',
+    'msan': '0',
+    # TODO(maruel): This may not always be true.
+    'target_arch': 'arm',
+    'tsan': '0',
+    'use_custom_libcxx': '0',
+    'use_instrumented_libraries': '0',
+    'use_prebuilt_instrumented_libraries': '0',
+    'use_ozone': '0',
+    'use_x11': '0',
+    'v8_use_external_startup_data': '1',
+    'msvs_version': '0',
+  }
+
+
+def IsIsolateEmpty(isolate_path):
+  """Returns whether there are no files in the .isolate."""
+  with open(isolate_path) as f:
+    return "'files': []" in f.read()
+
+
+class Isolator(object):
+  """Manages calls to isolate.py for the android test runner scripts."""
+
+  def __init__(self):
+    self._isolate_deps_dir = tempfile.mkdtemp()
+
+  @property
+  def isolate_deps_dir(self):
+    return self._isolate_deps_dir
+
+  def Clear(self):
+    """Deletes the isolate dependency directory."""
+    if os.path.exists(self._isolate_deps_dir):
+      shutil.rmtree(self._isolate_deps_dir)
+
+  def Remap(self, isolate_abs_path, isolated_abs_path,
+            path_variables=None, config_variables=None):
+    """Remaps data dependencies into |self._isolate_deps_dir|.
+
+    Args:
+      isolate_abs_path: The absolute path to the .isolate file, which specifies
+        data dependencies in the source tree.
+      isolated_abs_path: The absolute path to the .isolated file, which is
+        generated by isolate.py and specifies data dependencies in
+        |self._isolate_deps_dir| and their digests.
+      path_variables: A dict containing everything that should be passed
+        as a |--path-variable| to the isolate script. Defaults to the return
+        value of |DefaultPathVariables()|.
+      config_variables: A dict containing everything that should be passed
+        as a |--config-variable| to the isolate script. Defaults to the return
+        value of |DefaultConfigVariables()|.
+    Raises:
+      Exception if the isolate command fails for some reason.
+    """
+    if not path_variables:
+      path_variables = DefaultPathVariables()
+    if not config_variables:
+      config_variables = DefaultConfigVariables()
+
+    isolate_cmd = [
+      sys.executable, _ISOLATE_SCRIPT, 'remap',
+      '--isolate', isolate_abs_path,
+      '--isolated', isolated_abs_path,
+      '--outdir', self._isolate_deps_dir,
+    ]
+    for k, v in path_variables.iteritems():
+      isolate_cmd.extend(['--path-variable', k, v])
+    for k, v in config_variables.iteritems():
+      isolate_cmd.extend(['--config-variable', k, v])
+
+    exit_code, _ = cmd_helper.GetCmdStatusAndOutput(isolate_cmd)
+    if exit_code:
+      raise Exception('isolate command failed: %s' % ' '.join(isolate_cmd))
+
+  def VerifyHardlinks(self):
+    """Checks |isolate_deps_dir| for a hardlink.
+
+    Returns:
+      True if a hardlink is found.
+      False if nothing is found.
+    Raises:
+      Exception if a non-hardlink is found.
+    """
+    for root, _, filenames in os.walk(self._isolate_deps_dir):
+      if filenames:
+        linked_file = os.path.join(root, filenames[0])
+        orig_file = os.path.join(
+            self._isolate_deps_dir,
+            os.path.relpath(linked_file, self._isolate_deps_dir))
+        if os.stat(linked_file).st_ino == os.stat(orig_file).st_ino:
+          return True
+        else:
+          raise Exception('isolate remap command did not use hardlinks.')
+    return False
+
+  def PurgeExcluded(self, deps_exclusion_list):
+    """Deletes anything on |deps_exclusion_list| from |self._isolate_deps_dir|.
+
+    Args:
+      deps_exclusion_list: A list of globs to exclude from the isolate
+        dependency directory.
+    """
+    excluded_paths = (
+        x for y in deps_exclusion_list
+        for x in glob.glob(
+            os.path.abspath(os.path.join(self._isolate_deps_dir, y))))
+    for p in excluded_paths:
+      if os.path.isdir(p):
+        shutil.rmtree(p)
+      else:
+        os.remove(p)
+
+  @classmethod
+  def _DestructiveMerge(cls, src, dest):
+    if os.path.exists(dest) and os.path.isdir(dest):
+      for p in os.listdir(src):
+        cls._DestructiveMerge(os.path.join(src, p), os.path.join(dest, p))
+      os.rmdir(src)
+    else:
+      shutil.move(src, dest)
+
+
+  def MoveOutputDeps(self):
+    """Moves files from the output directory to the top level of
+      |self._isolate_deps_dir|.
+
+    Moves pak files from the output directory to to <isolate_deps_dir>/paks
+    Moves files from the product directory to <isolate_deps_dir>
+    """
+    # On Android, all pak files need to be in the top-level 'paks' directory.
+    paks_dir = os.path.join(self._isolate_deps_dir, 'paks')
+    os.mkdir(paks_dir)
+
+    deps_out_dir = os.path.join(
+        self._isolate_deps_dir,
+        os.path.relpath(os.path.join(constants.GetOutDirectory(), os.pardir),
+                        host_paths.DIR_SOURCE_ROOT))
+    for root, _, filenames in os.walk(deps_out_dir):
+      for filename in fnmatch.filter(filenames, '*.pak'):
+        shutil.move(os.path.join(root, filename), paks_dir)
+
+    # Move everything in PRODUCT_DIR to top level.
+    deps_product_dir = os.path.join(
+        deps_out_dir, os.path.basename(constants.GetOutDirectory()))
+    if os.path.isdir(deps_product_dir):
+      for p in os.listdir(deps_product_dir):
+        Isolator._DestructiveMerge(os.path.join(deps_product_dir, p),
+                                   os.path.join(self._isolate_deps_dir, p))
+      os.rmdir(deps_product_dir)
+      os.rmdir(deps_out_dir)
diff --git a/build/android/pylib/utils/logging_utils.py b/build/android/pylib/utils/logging_utils.py
new file mode 100644
index 0000000..2c2eabf
--- /dev/null
+++ b/build/android/pylib/utils/logging_utils.py
@@ -0,0 +1,98 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import contextlib
+import logging
+import os
+
+from pylib.constants import host_paths
+
+_COLORAMA_PATH = os.path.join(
+    host_paths.DIR_SOURCE_ROOT, 'third_party', 'colorama', 'src')
+
+with host_paths.SysPath(_COLORAMA_PATH):
+  import colorama
+
+class ColorStreamHandler(logging.StreamHandler):
+  """Handler that can be used to colorize logging output.
+
+  Example using a specific logger:
+
+    logger = logging.getLogger('my_logger')
+    logger.addHandler(ColorStreamHandler())
+    logger.info('message')
+
+  Example using the root logger:
+
+    ColorStreamHandler.MakeDefault()
+    logging.info('message')
+
+  """
+  # pylint does not see members added dynamically in the constructor.
+  # pylint: disable=no-member
+  color_map = {
+    logging.DEBUG: colorama.Fore.CYAN,
+    logging.WARNING: colorama.Fore.YELLOW,
+    logging.ERROR: colorama.Fore.RED,
+    logging.CRITICAL: colorama.Back.RED + colorama.Style.BRIGHT,
+  }
+
+  def __init__(self, force_color=False):
+    super(ColorStreamHandler, self).__init__()
+    self.force_color = force_color
+
+  @property
+  def is_tty(self):
+    isatty = getattr(self.stream, 'isatty', None)
+    return isatty and isatty()
+
+  #override
+  def format(self, record):
+    message = logging.StreamHandler.format(self, record)
+    if self.force_color or self.is_tty:
+      return self.Colorize(message, record.levelno)
+    return message
+
+  def Colorize(self, message, log_level):
+    try:
+      return self.color_map[log_level] + message + colorama.Style.RESET_ALL
+    except KeyError:
+      return message
+
+  @staticmethod
+  def MakeDefault(force_color=False):
+     """
+     Replaces the default logging handlers with a coloring handler. To use
+     a colorizing handler at the same time as others, either register them
+     after this call, or add the ColorStreamHandler on the logger using
+     Logger.addHandler()
+
+     Args:
+       force_color: Set to True to bypass the tty check and always colorize.
+     """
+     # If the existing handlers aren't removed, messages are duplicated
+     logging.getLogger().handlers = []
+     logging.getLogger().addHandler(ColorStreamHandler(force_color))
+
+
+@contextlib.contextmanager
+def SuppressLogging(level=logging.ERROR):
+  """Momentarilly suppress logging events from all loggers.
+
+  TODO(jbudorick): This is not thread safe. Log events from other threads might
+  also inadvertently dissapear.
+
+  Example:
+
+    with logging_utils.SuppressLogging():
+      # all but CRITICAL logging messages are suppressed
+      logging.info('just doing some thing') # not shown
+      logging.critical('something really bad happened') # still shown
+
+  Args:
+    level: logging events with this or lower levels are suppressed.
+  """
+  logging.disable(level)
+  yield
+  logging.disable(logging.NOTSET)
diff --git a/build/android/pylib/utils/md5sum.py b/build/android/pylib/utils/md5sum.py
new file mode 100644
index 0000000..a8fedcc
--- /dev/null
+++ b/build/android/pylib/utils/md5sum.py
@@ -0,0 +1,8 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=unused-wildcard-import
+# pylint: disable=wildcard-import
+
+from devil.android.md5sum import *
diff --git a/build/android/pylib/utils/mock_calls.py b/build/android/pylib/utils/mock_calls.py
new file mode 100644
index 0000000..c65109d
--- /dev/null
+++ b/build/android/pylib/utils/mock_calls.py
@@ -0,0 +1,8 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=unused-wildcard-import
+# pylint: disable=wildcard-import
+
+from devil.utils.mock_calls import *
diff --git a/build/android/pylib/utils/parallelizer.py b/build/android/pylib/utils/parallelizer.py
new file mode 100644
index 0000000..49b18f0
--- /dev/null
+++ b/build/android/pylib/utils/parallelizer.py
@@ -0,0 +1,8 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=unused-wildcard-import
+# pylint: disable=wildcard-import
+
+from devil.utils.parallelizer import *
diff --git a/build/android/pylib/utils/proguard.py b/build/android/pylib/utils/proguard.py
new file mode 100644
index 0000000..89dc4c7
--- /dev/null
+++ b/build/android/pylib/utils/proguard.py
@@ -0,0 +1,291 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import re
+import tempfile
+
+from devil.utils import cmd_helper
+from pylib import constants
+
+
+_PROGUARD_CLASS_RE = re.compile(r'\s*?- Program class:\s*([\S]+)$')
+_PROGUARD_SUPERCLASS_RE = re.compile(r'\s*?  Superclass:\s*([\S]+)$')
+_PROGUARD_SECTION_RE = re.compile(
+    r'^(Interfaces|Constant Pool|Fields|Methods|Class file attributes) '
+    r'\(count = \d+\):$')
+_PROGUARD_METHOD_RE = re.compile(r'\s*?- Method:\s*(\S*)[(].*$')
+_PROGUARD_ANNOTATION_RE = re.compile(r'^(\s*?)- Annotation \[L(\S*);\]:$')
+_ELEMENT_PRIMITIVE = 0
+_ELEMENT_ARRAY = 1
+_ELEMENT_ANNOTATION = 2
+_PROGUARD_ELEMENT_RES = [
+  (_ELEMENT_PRIMITIVE,
+   re.compile(r'^(\s*?)- Constant element value \[(\S*) .*\]$')),
+  (_ELEMENT_ARRAY,
+   re.compile(r'^(\s*?)- Array element value \[(\S*)\]:$')),
+  (_ELEMENT_ANNOTATION,
+   re.compile(r'^(\s*?)- Annotation element value \[(\S*)\]:$'))
+]
+_PROGUARD_INDENT_WIDTH = 2
+_PROGUARD_ANNOTATION_VALUE_RE = re.compile(r'^(\s*?)- \S+? \[(.*)\]$')
+
+_PROGUARD_PATH_SDK = os.path.join(
+    constants.PROGUARD_ROOT, 'lib', 'proguard.jar')
+_PROGUARD_PATH_BUILT = (
+    os.path.join(os.environ['ANDROID_BUILD_TOP'], 'external', 'proguard',
+                 'lib', 'proguard.jar')
+    if 'ANDROID_BUILD_TOP' in os.environ else None)
+_PROGUARD_PATH = (
+    _PROGUARD_PATH_SDK if os.path.exists(_PROGUARD_PATH_SDK)
+    else _PROGUARD_PATH_BUILT)
+
+
+def Dump(jar_path):
+  """Dumps class and method information from a JAR into a dict via proguard.
+
+  Args:
+    jar_path: An absolute path to the JAR file to dump.
+  Returns:
+    A dict in the following format:
+      {
+        'classes': [
+          {
+            'class': '',
+            'superclass': '',
+            'annotations': {/* dict -- see below */},
+            'methods': [
+              {
+                'method': '',
+                'annotations': {/* dict -- see below */},
+              },
+              ...
+            ],
+          },
+          ...
+        ],
+      }
+
+    Annotations dict format:
+      {
+        'empty-annotation-class-name': None,
+        'annotation-class-name': {
+          'field': 'primitive-value',
+          'field': [ 'array-item-1', 'array-item-2', ... ],
+          'field': {
+            /* Object value */
+            'field': 'primitive-value',
+            'field': [ 'array-item-1', 'array-item-2', ... ],
+            'field': { /* Object value */ }
+          }
+        }
+      }
+
+    Note that for top-level annotations their class names are used for
+    identification, whereas for any nested annotations the corresponding
+    field names are used.
+
+    One drawback of this approach is that an array containing empty
+    annotation classes will be represented as an array of 'None' values,
+    thus it will not be possible to find out annotation class names.
+    On the other hand, storing both annotation class name and the field name
+    would produce a very complex JSON.
+  """
+
+  with tempfile.NamedTemporaryFile() as proguard_output:
+    cmd_helper.GetCmdStatusAndOutput([
+        'java',
+        '-jar', _PROGUARD_PATH,
+        '-injars', jar_path,
+        '-dontshrink', '-dontoptimize', '-dontobfuscate', '-dontpreverify',
+        '-dump', proguard_output.name])
+    return Parse(proguard_output)
+
+class _AnnotationElement(object):
+  def __init__(self, name, ftype, depth):
+    self.ref = None
+    self.name = name
+    self.ftype = ftype
+    self.depth = depth
+
+class _ParseState(object):
+  _INITIAL_VALUES = (lambda: None, list, dict)
+  # Empty annotations are represented as 'None', not as an empty dictionary.
+  _LAZY_INITIAL_VALUES = (lambda: None, list, lambda: None)
+
+  def __init__(self):
+    self._class_result = None
+    self._method_result = None
+    self._parse_annotations = False
+    self._annotation_stack = []
+
+  def ResetPerSection(self, section_name):
+    self.InitMethod(None)
+    self._parse_annotations = (
+      section_name in ['Class file attributes', 'Methods'])
+
+  def ParseAnnotations(self):
+    return self._parse_annotations
+
+  def CreateAndInitClass(self, class_name):
+    self.InitMethod(None)
+    self._class_result = {
+      'class': class_name,
+      'superclass': '',
+      'annotations': {},
+      'methods': [],
+    }
+    return self._class_result
+
+  def HasCurrentClass(self):
+    return bool(self._class_result)
+
+  def SetSuperClass(self, superclass):
+    assert self.HasCurrentClass()
+    self._class_result['superclass'] = superclass
+
+  def InitMethod(self, method_name):
+    self._annotation_stack = []
+    if method_name:
+      self._method_result = {
+        'method': method_name,
+        'annotations': {},
+      }
+      self._class_result['methods'].append(self._method_result)
+    else:
+      self._method_result = None
+
+  def InitAnnotation(self, annotation, depth):
+    if not self._annotation_stack:
+      # Add a fake parent element comprising 'annotations' dictionary,
+      # so we can work uniformly with both top-level and nested annotations.
+      annotations = _AnnotationElement(
+        '<<<top level>>>', _ELEMENT_ANNOTATION, depth - 1)
+      if self._method_result:
+        annotations.ref = self._method_result['annotations']
+      else:
+        annotations.ref = self._class_result['annotations']
+      self._annotation_stack = [annotations]
+    self._BacktrackAnnotationStack(depth)
+    if not self.HasCurrentAnnotation():
+      self._annotation_stack.append(
+        _AnnotationElement(annotation, _ELEMENT_ANNOTATION, depth))
+    self._CreateAnnotationPlaceHolder(self._LAZY_INITIAL_VALUES)
+
+  def HasCurrentAnnotation(self):
+    return len(self._annotation_stack) > 1
+
+  def InitAnnotationField(self, field, field_type, depth):
+    self._BacktrackAnnotationStack(depth)
+    # Create the parent representation, if needed. E.g. annotations
+    # are represented with `None`, not with `{}` until they receive the first
+    # field.
+    self._CreateAnnotationPlaceHolder(self._INITIAL_VALUES)
+    if self._annotation_stack[-1].ftype == _ELEMENT_ARRAY:
+      # Nested arrays are not allowed in annotations.
+      assert not field_type == _ELEMENT_ARRAY
+      # Use array index instead of bogus field name.
+      field = len(self._annotation_stack[-1].ref)
+    self._annotation_stack.append(_AnnotationElement(field, field_type, depth))
+    self._CreateAnnotationPlaceHolder(self._LAZY_INITIAL_VALUES)
+
+  def UpdateCurrentAnnotationFieldValue(self, value, depth):
+    self._BacktrackAnnotationStack(depth)
+    self._InitOrUpdateCurrentField(value)
+
+  def _CreateAnnotationPlaceHolder(self, constructors):
+    assert self.HasCurrentAnnotation()
+    field = self._annotation_stack[-1]
+    if field.ref is None:
+      field.ref = constructors[field.ftype]()
+      self._InitOrUpdateCurrentField(field.ref)
+
+  def _BacktrackAnnotationStack(self, depth):
+    stack = self._annotation_stack
+    while len(stack) > 0 and stack[-1].depth >= depth:
+      stack.pop()
+
+  def _InitOrUpdateCurrentField(self, value):
+    assert self.HasCurrentAnnotation()
+    parent = self._annotation_stack[-2]
+    assert not parent.ref is None
+    # There can be no nested constant element values.
+    assert parent.ftype in [_ELEMENT_ARRAY, _ELEMENT_ANNOTATION]
+    field = self._annotation_stack[-1]
+    if type(value) is str and not field.ftype == _ELEMENT_PRIMITIVE:
+      # The value comes from the output parser via
+      # UpdateCurrentAnnotationFieldValue, and should be a value of a constant
+      # element. If it isn't, just skip it.
+      return
+    if parent.ftype == _ELEMENT_ARRAY and field.name >= len(parent.ref):
+      parent.ref.append(value)
+    else:
+      parent.ref[field.name] = value
+
+
+def _GetDepth(prefix):
+  return len(prefix) // _PROGUARD_INDENT_WIDTH
+
+def Parse(proguard_output):
+  results = {
+    'classes': [],
+  }
+
+  state = _ParseState()
+
+  for line in proguard_output:
+    line = line.strip('\r\n')
+
+    m = _PROGUARD_CLASS_RE.match(line)
+    if m:
+      results['classes'].append(
+        state.CreateAndInitClass(m.group(1).replace('/', '.')))
+      continue
+
+    if not state.HasCurrentClass():
+      continue
+
+    m = _PROGUARD_SUPERCLASS_RE.match(line)
+    if m:
+      state.SetSuperClass(m.group(1).replace('/', '.'))
+      continue
+
+    m = _PROGUARD_SECTION_RE.match(line)
+    if m:
+      state.ResetPerSection(m.group(1))
+      continue
+
+    m = _PROGUARD_METHOD_RE.match(line)
+    if m:
+      state.InitMethod(m.group(1))
+      continue
+
+    if not state.ParseAnnotations():
+      continue
+
+    m = _PROGUARD_ANNOTATION_RE.match(line)
+    if m:
+      # Ignore the annotation package.
+      state.InitAnnotation(m.group(2).split('/')[-1], _GetDepth(m.group(1)))
+      continue
+
+    if state.HasCurrentAnnotation():
+      m = None
+      for (element_type, element_re) in _PROGUARD_ELEMENT_RES:
+        m = element_re.match(line)
+        if m:
+          state.InitAnnotationField(
+            m.group(2), element_type, _GetDepth(m.group(1)))
+          break
+      if m:
+        continue
+      m = _PROGUARD_ANNOTATION_VALUE_RE.match(line)
+      if m:
+        state.UpdateCurrentAnnotationFieldValue(
+          m.group(2), _GetDepth(m.group(1)))
+      else:
+        state.InitMethod(None)
+
+
+  return results
diff --git a/build/android/pylib/utils/proguard_test.py b/build/android/pylib/utils/proguard_test.py
new file mode 100644
index 0000000..497e12d
--- /dev/null
+++ b/build/android/pylib/utils/proguard_test.py
@@ -0,0 +1,490 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from pylib.utils import proguard
+
+class TestParse(unittest.TestCase):
+
+  def setUp(self):
+    self.maxDiff = None
+
+  def testClass(self):
+    actual = proguard.Parse(
+      ['- Program class: org/example/Test',
+       '  Superclass: java/lang/Object'])
+    expected = {
+      'classes': [
+        {
+          'class': 'org.example.Test',
+          'superclass': 'java.lang.Object',
+          'annotations': {},
+          'methods': []
+        }
+      ]
+    }
+    self.assertEquals(expected, actual)
+
+  def testMethod(self):
+    actual = proguard.Parse(
+      ['- Program class: org/example/Test',
+       'Methods (count = 1):',
+       '- Method:       <init>()V'])
+    expected = {
+      'classes': [
+        {
+          'class': 'org.example.Test',
+          'superclass': '',
+          'annotations': {},
+          'methods': [
+            {
+              'method': '<init>',
+              'annotations': {}
+            }
+          ]
+        }
+      ]
+    }
+    self.assertEquals(expected, actual)
+
+  def testClassAnnotation(self):
+    actual = proguard.Parse(
+      ['- Program class: org/example/Test',
+       'Class file attributes (count = 3):',
+       '  - Annotation [Lorg/example/Annotation;]:',
+       '  - Annotation [Lorg/example/AnnotationWithValue;]:',
+       '    - Constant element value [attr \'13\']',
+       '      - Utf8 [val]',
+       '  - Annotation [Lorg/example/AnnotationWithTwoValues;]:',
+       '    - Constant element value [attr1 \'13\']',
+       '      - Utf8 [val1]',
+       '    - Constant element value [attr2 \'13\']',
+       '      - Utf8 [val2]'])
+    expected = {
+      'classes': [
+        {
+          'class': 'org.example.Test',
+          'superclass': '',
+          'annotations': {
+            'Annotation': None,
+            'AnnotationWithValue': {'attr': 'val'},
+            'AnnotationWithTwoValues': {'attr1': 'val1', 'attr2': 'val2'}
+          },
+          'methods': []
+        }
+      ]
+    }
+    self.assertEquals(expected, actual)
+
+  def testClassAnnotationWithArrays(self):
+    actual = proguard.Parse(
+      ['- Program class: org/example/Test',
+       'Class file attributes (count = 3):',
+       '  - Annotation [Lorg/example/AnnotationWithEmptyArray;]:',
+       '    - Array element value [arrayAttr]:',
+       '  - Annotation [Lorg/example/AnnotationWithOneElemArray;]:',
+       '    - Array element value [arrayAttr]:',
+       '      - Constant element value [(default) \'13\']',
+       '        - Utf8 [val]',
+       '  - Annotation [Lorg/example/AnnotationWithTwoElemArray;]:',
+       '    - Array element value [arrayAttr]:',
+       '      - Constant element value [(default) \'13\']',
+       '        - Utf8 [val1]',
+       '      - Constant element value [(default) \'13\']',
+       '        - Utf8 [val2]'])
+    expected = {
+      'classes': [
+        {
+          'class': 'org.example.Test',
+          'superclass': '',
+          'annotations': {
+            'AnnotationWithEmptyArray': {'arrayAttr': []},
+            'AnnotationWithOneElemArray': {'arrayAttr': ['val']},
+            'AnnotationWithTwoElemArray': {'arrayAttr': ['val1', 'val2']}
+          },
+          'methods': []
+        }
+      ]
+    }
+    self.assertEquals(expected, actual)
+
+  def testNestedClassAnnotations(self):
+    actual = proguard.Parse(
+      ['- Program class: org/example/Test',
+       'Class file attributes (count = 1):',
+       '  - Annotation [Lorg/example/OuterAnnotation;]:',
+       '    - Constant element value [outerAttr \'13\']',
+       '      - Utf8 [outerVal]',
+       '    - Array element value [outerArr]:',
+       '      - Constant element value [(default) \'13\']',
+       '        - Utf8 [outerArrVal1]',
+       '      - Constant element value [(default) \'13\']',
+       '        - Utf8 [outerArrVal2]',
+       '    - Annotation element value [emptyAnn]:',
+       '      - Annotation [Lorg/example/EmptyAnnotation;]:',
+       '    - Annotation element value [ann]:',
+       '      - Annotation [Lorg/example/InnerAnnotation;]:',
+       '        - Constant element value [innerAttr \'13\']',
+       '          - Utf8 [innerVal]',
+       '        - Array element value [innerArr]:',
+       '          - Constant element value [(default) \'13\']',
+       '            - Utf8 [innerArrVal1]',
+       '          - Constant element value [(default) \'13\']',
+       '            - Utf8 [innerArrVal2]',
+       '        - Annotation element value [emptyInnerAnn]:',
+       '          - Annotation [Lorg/example/EmptyAnnotation;]:'])
+    expected = {
+      'classes': [
+        {
+          'class': 'org.example.Test',
+          'superclass': '',
+          'annotations': {
+            'OuterAnnotation': {
+              'outerAttr': 'outerVal',
+              'outerArr': ['outerArrVal1', 'outerArrVal2'],
+              'emptyAnn': None,
+              'ann': {
+                'innerAttr': 'innerVal',
+                'innerArr': ['innerArrVal1', 'innerArrVal2'],
+                'emptyInnerAnn': None
+              }
+            }
+          },
+          'methods': []
+        }
+      ]
+    }
+    self.assertEquals(expected, actual)
+
+  def testClassArraysOfAnnotations(self):
+    actual = proguard.Parse(
+      ['- Program class: org/example/Test',
+       'Class file attributes (count = 1):',
+       '   - Annotation [Lorg/example/OuterAnnotation;]:',
+       '     - Array element value [arrayWithEmptyAnnotations]:',
+       '       - Annotation element value [(default)]:',
+       '         - Annotation [Lorg/example/EmptyAnnotation;]:',
+       '       - Annotation element value [(default)]:',
+       '         - Annotation [Lorg/example/EmptyAnnotation;]:',
+       '     - Array element value [outerArray]:',
+       '       - Annotation element value [(default)]:',
+       '         - Annotation [Lorg/example/InnerAnnotation;]:',
+       '           - Constant element value [innerAttr \'115\']',
+       '             - Utf8 [innerVal]',
+       '           - Array element value [arguments]:',
+       '             - Annotation element value [(default)]:',
+       '               - Annotation [Lorg/example/InnerAnnotation$Argument;]:',
+       '                 - Constant element value [arg1Attr \'115\']',
+       '                   - Utf8 [arg1Val]',
+       '                 - Array element value [arg1Array]:',
+       '                   - Constant element value [(default) \'73\']',
+       '                     - Integer [11]',
+       '                   - Constant element value [(default) \'73\']',
+       '                     - Integer [12]',
+       '             - Annotation element value [(default)]:',
+       '               - Annotation [Lorg/example/InnerAnnotation$Argument;]:',
+       '                 - Constant element value [arg2Attr \'115\']',
+       '                   - Utf8 [arg2Val]',
+       '                 - Array element value [arg2Array]:',
+       '                   - Constant element value [(default) \'73\']',
+       '                     - Integer [21]',
+       '                   - Constant element value [(default) \'73\']',
+       '                     - Integer [22]'])
+    expected = {
+      'classes': [
+        {
+          'class': 'org.example.Test',
+          'superclass': '',
+          'annotations': {
+            'OuterAnnotation': {
+              'arrayWithEmptyAnnotations': [None, None],
+              'outerArray': [
+                {
+                  'innerAttr': 'innerVal',
+                  'arguments': [
+                    {'arg1Attr': 'arg1Val', 'arg1Array': ['11', '12']},
+                    {'arg2Attr': 'arg2Val', 'arg2Array': ['21', '22']}
+                  ]
+                }
+              ]
+            }
+          },
+          'methods': []
+        }
+      ]
+    }
+    self.assertEquals(expected, actual)
+
+  def testReadFullClassFileAttributes(self):
+    actual = proguard.Parse(
+      ['- Program class: org/example/Test',
+       'Class file attributes (count = 3):',
+       '  - Source file attribute:',
+       '    - Utf8 [Class.java]',
+       '  - Runtime visible annotations attribute:',
+       '    - Annotation [Lorg/example/IntValueAnnotation;]:',
+       '      - Constant element value [value \'73\']',
+       '        - Integer [19]',
+       '  - Inner classes attribute (count = 1)',
+       '    - InnerClassesInfo:',
+       '      Access flags:  0x9 = public static',
+       '      - Class [org/example/Class1]',
+       '      - Class [org/example/Class2]',
+       '      - Utf8 [OnPageFinishedHelper]'])
+    expected = {
+      'classes': [
+        {
+          'class': 'org.example.Test',
+          'superclass': '',
+          'annotations': {
+            'IntValueAnnotation': {
+              'value': '19',
+            }
+          },
+          'methods': []
+        }
+      ]
+    }
+    self.assertEquals(expected, actual)
+
+  def testMethodAnnotation(self):
+    actual = proguard.Parse(
+      ['- Program class: org/example/Test',
+       'Methods (count = 1):',
+       '- Method:       Test()V',
+       '  - Annotation [Lorg/example/Annotation;]:',
+       '  - Annotation [Lorg/example/AnnotationWithValue;]:',
+       '    - Constant element value [attr \'13\']',
+       '      - Utf8 [val]',
+       '  - Annotation [Lorg/example/AnnotationWithTwoValues;]:',
+       '    - Constant element value [attr1 \'13\']',
+       '      - Utf8 [val1]',
+       '    - Constant element value [attr2 \'13\']',
+       '      - Utf8 [val2]'])
+    expected = {
+      'classes': [
+        {
+          'class': 'org.example.Test',
+          'superclass': '',
+          'annotations': {},
+          'methods': [
+            {
+              'method': 'Test',
+              'annotations': {
+                'Annotation': None,
+                'AnnotationWithValue': {'attr': 'val'},
+                'AnnotationWithTwoValues': {'attr1': 'val1', 'attr2': 'val2'}
+              },
+            }
+          ]
+        }
+      ]
+    }
+    self.assertEquals(expected, actual)
+
+  def testMethodAnnotationWithArrays(self):
+    actual = proguard.Parse(
+      ['- Program class: org/example/Test',
+       'Methods (count = 1):',
+       '- Method:       Test()V',
+       '  - Annotation [Lorg/example/AnnotationWithEmptyArray;]:',
+       '    - Array element value [arrayAttr]:',
+       '  - Annotation [Lorg/example/AnnotationWithOneElemArray;]:',
+       '    - Array element value [arrayAttr]:',
+       '      - Constant element value [(default) \'13\']',
+       '        - Utf8 [val]',
+       '  - Annotation [Lorg/example/AnnotationWithTwoElemArray;]:',
+       '    - Array element value [arrayAttr]:',
+       '      - Constant element value [(default) \'13\']',
+       '        - Utf8 [val1]',
+       '      - Constant element value [(default) \'13\']',
+       '        - Utf8 [val2]'])
+    expected = {
+      'classes': [
+        {
+          'class': 'org.example.Test',
+          'superclass': '',
+          'annotations': {},
+          'methods': [
+            {
+              'method': 'Test',
+              'annotations': {
+                'AnnotationWithEmptyArray': {'arrayAttr': []},
+                'AnnotationWithOneElemArray': {'arrayAttr': ['val']},
+                'AnnotationWithTwoElemArray': {'arrayAttr': ['val1', 'val2']}
+              },
+            }
+          ]
+        }
+      ]
+    }
+    self.assertEquals(expected, actual)
+
+  def testMethodAnnotationWithPrimitivesAndArrays(self):
+    actual = proguard.Parse(
+      ['- Program class: org/example/Test',
+       'Methods (count = 1):',
+       '- Method:       Test()V',
+       '  - Annotation [Lorg/example/AnnotationPrimitiveThenArray;]:',
+       '    - Constant element value [attr \'13\']',
+       '      - Utf8 [val]',
+       '    - Array element value [arrayAttr]:',
+       '      - Constant element value [(default) \'13\']',
+       '        - Utf8 [val]',
+       '  - Annotation [Lorg/example/AnnotationArrayThenPrimitive;]:',
+       '    - Array element value [arrayAttr]:',
+       '      - Constant element value [(default) \'13\']',
+       '        - Utf8 [val]',
+       '    - Constant element value [attr \'13\']',
+       '      - Utf8 [val]',
+       '  - Annotation [Lorg/example/AnnotationTwoArrays;]:',
+       '    - Array element value [arrayAttr1]:',
+       '      - Constant element value [(default) \'13\']',
+       '        - Utf8 [val1]',
+       '    - Array element value [arrayAttr2]:',
+       '      - Constant element value [(default) \'13\']',
+       '        - Utf8 [val2]'])
+    expected = {
+      'classes': [
+        {
+          'class': 'org.example.Test',
+          'superclass': '',
+          'annotations': {},
+          'methods': [
+            {
+              'method': 'Test',
+              'annotations': {
+                'AnnotationPrimitiveThenArray': {'attr': 'val',
+                                                 'arrayAttr': ['val']},
+                'AnnotationArrayThenPrimitive': {'arrayAttr': ['val'],
+                                                 'attr': 'val'},
+                'AnnotationTwoArrays': {'arrayAttr1': ['val1'],
+                                        'arrayAttr2': ['val2']}
+              },
+            }
+          ]
+        }
+      ]
+    }
+    self.assertEquals(expected, actual)
+
+  def testNestedMethodAnnotations(self):
+    actual = proguard.Parse(
+      ['- Program class: org/example/Test',
+       'Methods (count = 1):',
+       '- Method:       Test()V',
+       '  - Annotation [Lorg/example/OuterAnnotation;]:',
+       '    - Constant element value [outerAttr \'13\']',
+       '      - Utf8 [outerVal]',
+       '    - Array element value [outerArr]:',
+       '      - Constant element value [(default) \'13\']',
+       '        - Utf8 [outerArrVal1]',
+       '      - Constant element value [(default) \'13\']',
+       '        - Utf8 [outerArrVal2]',
+       '    - Annotation element value [emptyAnn]:',
+       '      - Annotation [Lorg/example/EmptyAnnotation;]:',
+       '    - Annotation element value [ann]:',
+       '      - Annotation [Lorg/example/InnerAnnotation;]:',
+       '        - Constant element value [innerAttr \'13\']',
+       '          - Utf8 [innerVal]',
+       '        - Array element value [innerArr]:',
+       '          - Constant element value [(default) \'13\']',
+       '            - Utf8 [innerArrVal1]',
+       '          - Constant element value [(default) \'13\']',
+       '            - Utf8 [innerArrVal2]',
+       '        - Annotation element value [emptyInnerAnn]:',
+       '          - Annotation [Lorg/example/EmptyAnnotation;]:'])
+    expected = {
+      'classes': [
+        {
+          'class': 'org.example.Test',
+          'superclass': '',
+          'annotations': {},
+          'methods': [
+            {
+              'method': 'Test',
+              'annotations': {
+                'OuterAnnotation': {
+                  'outerAttr': 'outerVal',
+                  'outerArr': ['outerArrVal1', 'outerArrVal2'],
+                  'emptyAnn': None,
+                  'ann': {
+                    'innerAttr': 'innerVal',
+                    'innerArr': ['innerArrVal1', 'innerArrVal2'],
+                    'emptyInnerAnn': None
+                  }
+                }
+              },
+            }
+          ]
+        }
+      ]
+    }
+    self.assertEquals(expected, actual)
+
+  def testMethodArraysOfAnnotations(self):
+    actual = proguard.Parse(
+      ['- Program class: org/example/Test',
+       'Methods (count = 1):',
+       '- Method:       Test()V',
+       '   - Annotation [Lorg/example/OuterAnnotation;]:',
+       '     - Array element value [arrayWithEmptyAnnotations]:',
+       '       - Annotation element value [(default)]:',
+       '         - Annotation [Lorg/example/EmptyAnnotation;]:',
+       '       - Annotation element value [(default)]:',
+       '         - Annotation [Lorg/example/EmptyAnnotation;]:',
+       '     - Array element value [outerArray]:',
+       '       - Annotation element value [(default)]:',
+       '         - Annotation [Lorg/example/InnerAnnotation;]:',
+       '           - Constant element value [innerAttr \'115\']',
+       '             - Utf8 [innerVal]',
+       '           - Array element value [arguments]:',
+       '             - Annotation element value [(default)]:',
+       '               - Annotation [Lorg/example/InnerAnnotation$Argument;]:',
+       '                 - Constant element value [arg1Attr \'115\']',
+       '                   - Utf8 [arg1Val]',
+       '                 - Array element value [arg1Array]:',
+       '                   - Constant element value [(default) \'73\']',
+       '                     - Integer [11]',
+       '                   - Constant element value [(default) \'73\']',
+       '                     - Integer [12]',
+       '             - Annotation element value [(default)]:',
+       '               - Annotation [Lorg/example/InnerAnnotation$Argument;]:',
+       '                 - Constant element value [arg2Attr \'115\']',
+       '                   - Utf8 [arg2Val]',
+       '                 - Array element value [arg2Array]:',
+       '                   - Constant element value [(default) \'73\']',
+       '                     - Integer [21]',
+       '                   - Constant element value [(default) \'73\']',
+       '                     - Integer [22]'])
+    expected = {
+      'classes': [
+        {
+          'class': 'org.example.Test',
+          'superclass': '',
+          'annotations': {},
+          'methods': [
+            {
+              'method': 'Test',
+              'annotations': {
+                'OuterAnnotation': {
+                  'arrayWithEmptyAnnotations': [None, None],
+                  'outerArray': [
+                    {
+                      'innerAttr': 'innerVal',
+                      'arguments': [
+                        {'arg1Attr': 'arg1Val', 'arg1Array': ['11', '12']},
+                        {'arg2Attr': 'arg2Val', 'arg2Array': ['21', '22']}
+                      ]
+                    }
+                  ]
+                }
+              }
+            }
+          ]
+        }
+      ]
+    }
+    self.assertEquals(expected, actual)
diff --git a/build/android/pylib/utils/repo_utils.py b/build/android/pylib/utils/repo_utils.py
new file mode 100644
index 0000000..5a0efa8
--- /dev/null
+++ b/build/android/pylib/utils/repo_utils.py
@@ -0,0 +1,16 @@
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from devil.utils import cmd_helper
+
+
+def GetGitHeadSHA1(in_directory):
+  """Returns the git hash tag for the given directory.
+
+  Args:
+    in_directory: The directory where git is to be run.
+  """
+  command_line = ['git', 'log', '-1', '--pretty=format:%H']
+  output = cmd_helper.GetCmdOutput(command_line, cwd=in_directory)
+  return output[0:40]
diff --git a/build/android/pylib/utils/reraiser_thread.py b/build/android/pylib/utils/reraiser_thread.py
new file mode 100644
index 0000000..828cd2b
--- /dev/null
+++ b/build/android/pylib/utils/reraiser_thread.py
@@ -0,0 +1,8 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=unused-wildcard-import
+# pylint: disable=wildcard-import
+
+from devil.utils.reraiser_thread import *
diff --git a/build/android/pylib/utils/run_tests_helper.py b/build/android/pylib/utils/run_tests_helper.py
new file mode 100644
index 0000000..5c48668
--- /dev/null
+++ b/build/android/pylib/utils/run_tests_helper.py
@@ -0,0 +1,8 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=unused-wildcard-import
+# pylint: disable=wildcard-import
+
+from devil.utils.run_tests_helper import *
diff --git a/build/android/pylib/utils/test_environment.py b/build/android/pylib/utils/test_environment.py
new file mode 100644
index 0000000..0a1326e
--- /dev/null
+++ b/build/android/pylib/utils/test_environment.py
@@ -0,0 +1,52 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import psutil
+import signal
+
+from devil.android import device_errors
+from devil.android import device_utils
+
+
+def _KillWebServers():
+  for s in [signal.SIGTERM, signal.SIGINT, signal.SIGQUIT, signal.SIGKILL]:
+    signalled = []
+    for server in ['lighttpd', 'webpagereplay']:
+      for p in psutil.process_iter():
+        try:
+          if not server in ' '.join(p.cmdline):
+            continue
+          logging.info('Killing %s %s %s', s, server, p.pid)
+          p.send_signal(s)
+          signalled.append(p)
+        except Exception: # pylint: disable=broad-except
+          logging.exception('Failed killing %s %s', server, p.pid)
+    for p in signalled:
+      try:
+        p.wait(1)
+      except Exception: # pylint: disable=broad-except
+        logging.exception('Failed waiting for %s to die.', p.pid)
+
+
+def CleanupLeftoverProcesses(devices):
+  """Clean up the test environment, restarting fresh adb and HTTP daemons.
+
+  Args:
+    devices: The devices to clean.
+  """
+  _KillWebServers()
+  device_utils.RestartServer()
+
+  def cleanup_device(d):
+    d.WaitUntilFullyBooted()
+    d.RestartAdbd()
+    try:
+      d.EnableRoot()
+    except device_errors.CommandFailedError:
+      logging.exception('Failed to enable root')
+    d.WaitUntilFullyBooted()
+
+  device_utils.DeviceUtils.parallel(devices).pMap(cleanup_device)
+
diff --git a/build/android/pylib/utils/time_profile.py b/build/android/pylib/utils/time_profile.py
new file mode 100644
index 0000000..094799c
--- /dev/null
+++ b/build/android/pylib/utils/time_profile.py
@@ -0,0 +1,45 @@
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import time
+
+
+class TimeProfile(object):
+  """Class for simple profiling of action, with logging of cost."""
+
+  def __init__(self, description='operation'):
+    self._starttime = None
+    self._endtime = None
+    self._description = description
+    self.Start()
+
+  def Start(self):
+    self._starttime = time.time()
+    self._endtime = None
+
+  def GetDelta(self):
+    """Returns the rounded delta.
+
+    Also stops the timer if Stop() has not already been called.
+    """
+    if self._endtime is None:
+      self.Stop(log=False)
+    delta = self._endtime - self._starttime
+    delta = round(delta, 2) if delta < 10 else round(delta, 1)
+    return delta
+
+  def LogResult(self):
+    """Logs the result."""
+    logging.info('%s seconds to perform %s', self.GetDelta(), self._description)
+
+  def Stop(self, log=True):
+    """Stop profiling.
+
+    Args:
+      log: Log the delta (defaults to true).
+    """
+    self._endtime = time.time()
+    if log:
+      self.LogResult()
diff --git a/build/android/pylib/utils/timeout_retry.py b/build/android/pylib/utils/timeout_retry.py
new file mode 100644
index 0000000..e566f45
--- /dev/null
+++ b/build/android/pylib/utils/timeout_retry.py
@@ -0,0 +1,8 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=unused-wildcard-import
+# pylint: disable=wildcard-import
+
+from devil.utils.timeout_retry import *
diff --git a/build/android/pylib/utils/watchdog_timer.py b/build/android/pylib/utils/watchdog_timer.py
new file mode 100644
index 0000000..967794c
--- /dev/null
+++ b/build/android/pylib/utils/watchdog_timer.py
@@ -0,0 +1,8 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=unused-wildcard-import
+# pylint: disable=wildcard-import
+
+from devil.utils.watchdog_timer import *
diff --git a/build/android/pylib/utils/xvfb.py b/build/android/pylib/utils/xvfb.py
new file mode 100644
index 0000000..cb9d50e
--- /dev/null
+++ b/build/android/pylib/utils/xvfb.py
@@ -0,0 +1,58 @@
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=W0702
+
+import os
+import signal
+import subprocess
+import sys
+import time
+
+
+def _IsLinux():
+  """Return True if on Linux; else False."""
+  return sys.platform.startswith('linux')
+
+
+class Xvfb(object):
+  """Class to start and stop Xvfb if relevant.  Nop if not Linux."""
+
+  def __init__(self):
+    self._pid = 0
+
+  def Start(self):
+    """Start Xvfb and set an appropriate DISPLAY environment.  Linux only.
+
+    Copied from tools/code_coverage/coverage_posix.py
+    """
+    if not _IsLinux():
+      return
+    proc = subprocess.Popen(['Xvfb', ':9', '-screen', '0', '1024x768x24',
+                             '-ac'],
+                            stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+    self._pid = proc.pid
+    if not self._pid:
+      raise Exception('Could not start Xvfb')
+    os.environ['DISPLAY'] = ':9'
+
+    # Now confirm, giving a chance for it to start if needed.
+    for _ in range(10):
+      proc = subprocess.Popen('xdpyinfo >/dev/null', shell=True)
+      _, retcode = os.waitpid(proc.pid, 0)
+      if retcode == 0:
+        break
+      time.sleep(0.25)
+    if retcode != 0:
+      raise Exception('Could not confirm Xvfb happiness')
+
+  def Stop(self):
+    """Stop Xvfb if needed.  Linux only."""
+    if self._pid:
+      try:
+        os.kill(self._pid, signal.SIGKILL)
+      except:
+        pass
+      del os.environ['DISPLAY']
+      self._pid = 0
diff --git a/build/android/pylib/utils/zip_utils.py b/build/android/pylib/utils/zip_utils.py
new file mode 100644
index 0000000..007b34b
--- /dev/null
+++ b/build/android/pylib/utils/zip_utils.py
@@ -0,0 +1,8 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=unused-wildcard-import
+# pylint: disable=wildcard-import
+
+from devil.utils.zip_utils import *
diff --git a/build/android/pylib/valgrind_tools.py b/build/android/pylib/valgrind_tools.py
new file mode 100644
index 0000000..8142893
--- /dev/null
+++ b/build/android/pylib/valgrind_tools.py
@@ -0,0 +1,235 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# pylint: disable=R0201
+
+import glob
+import logging
+import os.path
+import subprocess
+import sys
+
+from devil.android import device_errors
+from devil.android.valgrind_tools import base_tool
+from pylib.constants import DIR_SOURCE_ROOT
+
+
+def SetChromeTimeoutScale(device, scale):
+  """Sets the timeout scale in /data/local/tmp/chrome_timeout_scale to scale."""
+  path = '/data/local/tmp/chrome_timeout_scale'
+  if not scale or scale == 1.0:
+    # Delete if scale is None/0.0/1.0 since the default timeout scale is 1.0
+    device.RunShellCommand('rm %s' % path)
+  else:
+    device.WriteFile(path, '%f' % scale, as_root=True)
+
+
+
+class AddressSanitizerTool(base_tool.BaseTool):
+  """AddressSanitizer tool."""
+
+  WRAPPER_NAME = '/system/bin/asanwrapper'
+  # Disable memcmp overlap check.There are blobs (gl drivers)
+  # on some android devices that use memcmp on overlapping regions,
+  # nothing we can do about that.
+  EXTRA_OPTIONS = 'strict_memcmp=0,use_sigaltstack=1'
+
+  def __init__(self, device):
+    super(AddressSanitizerTool, self).__init__()
+    self._device = device
+
+  @classmethod
+  def CopyFiles(cls, device):
+    """Copies ASan tools to the device."""
+    libs = glob.glob(os.path.join(DIR_SOURCE_ROOT,
+                                  'third_party/llvm-build/Release+Asserts/',
+                                  'lib/clang/*/lib/linux/',
+                                  'libclang_rt.asan-arm-android.so'))
+    assert len(libs) == 1
+    subprocess.call(
+        [os.path.join(
+             DIR_SOURCE_ROOT,
+             'tools/android/asan/third_party/asan_device_setup.sh'),
+         '--device', str(device),
+         '--lib', libs[0],
+         '--extra-options', AddressSanitizerTool.EXTRA_OPTIONS])
+    device.WaitUntilFullyBooted()
+
+  def GetTestWrapper(self):
+    return AddressSanitizerTool.WRAPPER_NAME
+
+  def GetUtilWrapper(self):
+    """Returns the wrapper for utilities, such as forwarder.
+
+    AddressSanitizer wrapper must be added to all instrumented binaries,
+    including forwarder and the like. This can be removed if such binaries
+    were built without instrumentation. """
+    return self.GetTestWrapper()
+
+  def SetupEnvironment(self):
+    try:
+      self._device.EnableRoot()
+    except device_errors.CommandFailedError as e:
+      # Try to set the timeout scale anyway.
+      # TODO(jbudorick) Handle this exception appropriately after interface
+      #                 conversions are finished.
+      logging.error(str(e))
+    SetChromeTimeoutScale(self._device, self.GetTimeoutScale())
+
+  def CleanUpEnvironment(self):
+    SetChromeTimeoutScale(self._device, None)
+
+  def GetTimeoutScale(self):
+    # Very slow startup.
+    return 20.0
+
+
+class ValgrindTool(base_tool.BaseTool):
+  """Base abstract class for Valgrind tools."""
+
+  VG_DIR = '/data/local/tmp/valgrind'
+  VGLOGS_DIR = '/data/local/tmp/vglogs'
+
+  def __init__(self, device):
+    super(ValgrindTool, self).__init__()
+    self._device = device
+    # exactly 31 chars, SystemProperties::PROP_NAME_MAX
+    self._wrap_properties = ['wrap.com.google.android.apps.ch',
+                             'wrap.org.chromium.native_test']
+
+  @classmethod
+  def CopyFiles(cls, device):
+    """Copies Valgrind tools to the device."""
+    device.RunShellCommand(
+        'rm -r %s; mkdir %s' % (ValgrindTool.VG_DIR, ValgrindTool.VG_DIR))
+    device.RunShellCommand(
+        'rm -r %s; mkdir %s' % (ValgrindTool.VGLOGS_DIR,
+                                ValgrindTool.VGLOGS_DIR))
+    files = cls.GetFilesForTool()
+    device.PushChangedFiles(
+        [((os.path.join(DIR_SOURCE_ROOT, f),
+          os.path.join(ValgrindTool.VG_DIR, os.path.basename(f)))
+         for f in files)])
+
+  def SetupEnvironment(self):
+    """Sets up device environment."""
+    self._device.RunShellCommand('chmod 777 /data/local/tmp')
+    self._device.RunShellCommand('setenforce 0')
+    for prop in self._wrap_properties:
+      self._device.RunShellCommand(
+          'setprop %s "logwrapper %s"' % (prop, self.GetTestWrapper()))
+    SetChromeTimeoutScale(self._device, self.GetTimeoutScale())
+
+  def CleanUpEnvironment(self):
+    """Cleans up device environment."""
+    for prop in self._wrap_properties:
+      self._device.RunShellCommand('setprop %s ""' % (prop,))
+    SetChromeTimeoutScale(self._device, None)
+
+  @staticmethod
+  def GetFilesForTool():
+    """Returns a list of file names for the tool."""
+    raise NotImplementedError()
+
+  def NeedsDebugInfo(self):
+    """Whether this tool requires debug info.
+
+    Returns:
+      True if this tool can not work with stripped binaries.
+    """
+    return True
+
+
+class MemcheckTool(ValgrindTool):
+  """Memcheck tool."""
+
+  def __init__(self, device):
+    super(MemcheckTool, self).__init__(device)
+
+  @staticmethod
+  def GetFilesForTool():
+    """Returns a list of file names for the tool."""
+    return ['tools/valgrind/android/vg-chrome-wrapper.sh',
+            'tools/valgrind/memcheck/suppressions.txt',
+            'tools/valgrind/memcheck/suppressions_android.txt']
+
+  def GetTestWrapper(self):
+    """Returns a string that is to be prepended to the test command line."""
+    return ValgrindTool.VG_DIR + '/' + 'vg-chrome-wrapper.sh'
+
+  def GetTimeoutScale(self):
+    """Returns a multiplier that should be applied to timeout values."""
+    return 30
+
+
+class TSanTool(ValgrindTool):
+  """ThreadSanitizer tool. See http://code.google.com/p/data-race-test ."""
+
+  def __init__(self, device):
+    super(TSanTool, self).__init__(device)
+
+  @staticmethod
+  def GetFilesForTool():
+    """Returns a list of file names for the tool."""
+    return ['tools/valgrind/android/vg-chrome-wrapper-tsan.sh',
+            'tools/valgrind/tsan/suppressions.txt',
+            'tools/valgrind/tsan/suppressions_android.txt',
+            'tools/valgrind/tsan/ignores.txt']
+
+  def GetTestWrapper(self):
+    """Returns a string that is to be prepended to the test command line."""
+    return ValgrindTool.VG_DIR + '/' + 'vg-chrome-wrapper-tsan.sh'
+
+  def GetTimeoutScale(self):
+    """Returns a multiplier that should be applied to timeout values."""
+    return 30.0
+
+
+TOOL_REGISTRY = {
+    'memcheck': MemcheckTool,
+    'memcheck-renderer': MemcheckTool,
+    'tsan': TSanTool,
+    'tsan-renderer': TSanTool,
+    'asan': AddressSanitizerTool,
+}
+
+
+def CreateTool(tool_name, device):
+  """Creates a tool with the specified tool name.
+
+  Args:
+    tool_name: Name of the tool to create.
+    device: A DeviceUtils instance.
+  Returns:
+    A tool for the specified tool_name.
+  """
+  if not tool_name:
+    return base_tool.BaseTool()
+
+  ctor = TOOL_REGISTRY.get(tool_name)
+  if ctor:
+    return ctor(device)
+  else:
+    print 'Unknown tool %s, available tools: %s' % (
+        tool_name, ', '.join(sorted(TOOL_REGISTRY.keys())))
+    sys.exit(1)
+
+def PushFilesForTool(tool_name, device):
+  """Pushes the files required for |tool_name| to |device|.
+
+  Args:
+    tool_name: Name of the tool to create.
+    device: A DeviceUtils instance.
+  """
+  if not tool_name:
+    return
+
+  clazz = TOOL_REGISTRY.get(tool_name)
+  if clazz:
+    clazz.CopyFiles(device)
+  else:
+    print 'Unknown tool %s, available tools: %s' % (
+        tool_name, ', '.join(sorted(TOOL_REGISTRY.keys())))
+    sys.exit(1)
+
diff --git a/build/android/pylintrc b/build/android/pylintrc
new file mode 100644
index 0000000..8005a5d
--- /dev/null
+++ b/build/android/pylintrc
@@ -0,0 +1,15 @@
+[FORMAT]
+
+max-line-length=80
+
+[MESSAGES CONTROL]
+
+disable=abstract-class-not-used,bad-continuation,bad-indentation,duplicate-code,fixme,invalid-name,locally-disabled,locally-enabled,missing-docstring,star-args,too-few-public-methods,too-many-arguments,too-many-branches,too-many-instance-attributes,too-many-lines,too-many-locals,too-many-public-methods,too-many-statements,
+
+[REPORTS]
+
+reports=no
+
+[VARIABLES]
+
+dummy-variables-rgx=^_.*$|dummy
diff --git a/build/android/resource_sizes.py b/build/android/resource_sizes.py
new file mode 100755
index 0000000..1995d1e
--- /dev/null
+++ b/build/android/resource_sizes.py
@@ -0,0 +1,458 @@
+#!/usr/bin/python
+# Copyright (c) 2011 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Prints the size of each given file and optionally computes the size of
+   libchrome.so without the dependencies added for building with android NDK.
+   Also breaks down the contents of the APK to determine the installed size
+   and assign size contributions to different classes of file.
+"""
+
+import collections
+import json
+import logging
+import operator
+import optparse
+import os
+import re
+import struct
+import sys
+import tempfile
+import zipfile
+import zlib
+
+import devil_chromium
+from devil.utils import cmd_helper
+from pylib import constants
+from pylib.constants import host_paths
+
+_GRIT_PATH = os.path.join(host_paths.DIR_SOURCE_ROOT, 'tools', 'grit')
+
+with host_paths.SysPath(_GRIT_PATH):
+  from grit.format import data_pack # pylint: disable=import-error
+
+with host_paths.SysPath(host_paths.BUILD_COMMON_PATH):
+  import perf_tests_results_helper # pylint: disable=import-error
+
+# Python had a bug in zipinfo parsing that triggers on ChromeModern.apk
+# https://bugs.python.org/issue14315
+def _PatchedDecodeExtra(self):
+  # Try to decode the extra field.
+  extra = self.extra
+  unpack = struct.unpack
+  while len(extra) >= 4:
+    tp, ln = unpack('<HH', extra[:4])
+    if tp == 1:
+      if ln >= 24:
+        counts = unpack('<QQQ', extra[4:28])
+      elif ln == 16:
+        counts = unpack('<QQ', extra[4:20])
+      elif ln == 8:
+        counts = unpack('<Q', extra[4:12])
+      elif ln == 0:
+        counts = ()
+      else:
+        raise RuntimeError, "Corrupt extra field %s"%(ln,)
+
+      idx = 0
+
+      # ZIP64 extension (large files and/or large archives)
+      if self.file_size in (0xffffffffffffffffL, 0xffffffffL):
+        self.file_size = counts[idx]
+        idx += 1
+
+      if self.compress_size == 0xFFFFFFFFL:
+        self.compress_size = counts[idx]
+        idx += 1
+
+      if self.header_offset == 0xffffffffL:
+        self.header_offset = counts[idx]
+        idx += 1
+
+    extra = extra[ln + 4:]
+
+zipfile.ZipInfo._decodeExtra = (  # pylint: disable=protected-access
+    _PatchedDecodeExtra)
+
+# Static initializers expected in official builds. Note that this list is built
+# using 'nm' on libchrome.so which results from a GCC official build (i.e.
+# Clang is not supported currently).
+
+_BASE_CHART = {
+    'format_version': '0.1',
+    'benchmark_name': 'resource_sizes',
+    'benchmark_description': 'APK resource size information.',
+    'trace_rerun_options': [],
+    'charts': {}
+}
+_DUMP_STATIC_INITIALIZERS_PATH = os.path.join(
+    host_paths.DIR_SOURCE_ROOT, 'tools', 'linux', 'dump-static-initializers.py')
+_RC_HEADER_RE = re.compile(r'^#define (?P<name>\w+) (?P<id>\d+)$')
+
+
+def CountStaticInitializers(so_path):
+  def get_elf_section_size(readelf_stdout, section_name):
+    # Matches: .ctors PROGBITS 000000000516add0 5169dd0 000010 00 WA 0 0 8
+    match = re.search(r'\.%s.*$' % re.escape(section_name),
+                      readelf_stdout, re.MULTILINE)
+    if not match:
+      return (False, -1)
+    size_str = re.split(r'\W+', match.group(0))[5]
+    return (True, int(size_str, 16))
+
+  # Find the number of files with at least one static initializer.
+  # First determine if we're 32 or 64 bit
+  stdout = cmd_helper.GetCmdOutput(['readelf', '-h', so_path])
+  elf_class_line = re.search('Class:.*$', stdout, re.MULTILINE).group(0)
+  elf_class = re.split(r'\W+', elf_class_line)[1]
+  if elf_class == 'ELF32':
+    word_size = 4
+  else:
+    word_size = 8
+
+  # Then find the number of files with global static initializers.
+  # NOTE: this is very implementation-specific and makes assumptions
+  # about how compiler and linker implement global static initializers.
+  si_count = 0
+  stdout = cmd_helper.GetCmdOutput(['readelf', '-SW', so_path])
+  has_init_array, init_array_size = get_elf_section_size(stdout, 'init_array')
+  if has_init_array:
+    si_count = init_array_size / word_size
+  si_count = max(si_count, 0)
+  return si_count
+
+
+def GetStaticInitializers(so_path):
+  output = cmd_helper.GetCmdOutput([_DUMP_STATIC_INITIALIZERS_PATH, '-d',
+                                    so_path])
+  return output.splitlines()
+
+
+def ReportPerfResult(chart_data, graph_title, trace_title, value, units,
+                     improvement_direction='down', important=True):
+  """Outputs test results in correct format.
+
+  If chart_data is None, it outputs data in old format. If chart_data is a
+  dictionary, formats in chartjson format. If any other format defaults to
+  old format.
+  """
+  if chart_data and isinstance(chart_data, dict):
+    chart_data['charts'].setdefault(graph_title, {})
+    chart_data['charts'][graph_title][trace_title] = {
+        'type': 'scalar',
+        'value': value,
+        'units': units,
+        'improvement_direction': improvement_direction,
+        'important': important
+    }
+  else:
+    perf_tests_results_helper.PrintPerfResult(
+        graph_title, trace_title, [value], units)
+
+
+def PrintResourceSizes(files, chartjson=None):
+  """Prints the sizes of each given file.
+
+     Args:
+       files: List of files to print sizes for.
+  """
+  for f in files:
+    ReportPerfResult(chartjson, 'ResourceSizes', os.path.basename(f) + ' size',
+                     os.path.getsize(f), 'bytes')
+
+
+def PrintApkAnalysis(apk_filename, chartjson=None):
+  """Analyse APK to determine size contributions of different file classes."""
+  # Define a named tuple type for file grouping.
+  # name: Human readable name for this file group
+  # regex: Regular expression to match filename
+  # extracted: Function that takes a file name and returns whether the file is
+  #            extracted from the apk at install/runtime.
+  FileGroup = collections.namedtuple('FileGroup',
+                                     ['name', 'regex', 'extracted'])
+
+  # File groups are checked in sequence, so more specific regexes should be
+  # earlier in the list.
+  YES = lambda _: True
+  NO = lambda _: False
+  FILE_GROUPS = (
+      FileGroup('Native code', r'\.so$', lambda f: 'crazy' not in f),
+      FileGroup('Java code', r'\.dex$', YES),
+      FileGroup('Native resources (no l10n)', r'\.pak$', NO),
+      # For locale paks, assume only english paks are extracted.
+      FileGroup('Native resources (l10n)', r'\.lpak$', lambda f: 'en_' in f),
+      FileGroup('ICU (i18n library) data', r'assets/icudtl\.dat$', NO),
+      FileGroup('V8 Snapshots', r'\.bin$', NO),
+      FileGroup('PNG drawables', r'\.png$', NO),
+      FileGroup('Non-compiled Android resources', r'^res/', NO),
+      FileGroup('Compiled Android resources', r'\.arsc$', NO),
+      FileGroup('Package metadata', r'^(META-INF/|AndroidManifest\.xml$)', NO),
+      FileGroup('Unknown files', r'.', NO),
+      )
+
+  apk = zipfile.ZipFile(apk_filename, 'r')
+  try:
+    apk_contents = apk.infolist()
+  finally:
+    apk.close()
+
+  total_apk_size = os.path.getsize(apk_filename)
+  apk_basename = os.path.basename(apk_filename)
+
+  found_files = {}
+  for group in FILE_GROUPS:
+    found_files[group] = []
+
+  for member in apk_contents:
+    for group in FILE_GROUPS:
+      if re.search(group.regex, member.filename):
+        found_files[group].append(member)
+        break
+    else:
+      raise KeyError('No group found for file "%s"' % member.filename)
+
+  total_install_size = total_apk_size
+
+  for group in FILE_GROUPS:
+    apk_size = sum(member.compress_size for member in found_files[group])
+    install_size = apk_size
+    install_bytes = sum(f.file_size for f in found_files[group]
+                        if group.extracted(f.filename))
+    install_size += install_bytes
+    total_install_size += install_bytes
+
+    ReportPerfResult(chartjson, apk_basename + '_Breakdown',
+                     group.name + ' size', apk_size, 'bytes')
+    ReportPerfResult(chartjson, apk_basename + '_InstallBreakdown',
+                     group.name + ' size', install_size, 'bytes')
+
+  transfer_size = _CalculateCompressedSize(apk_filename)
+  ReportPerfResult(chartjson, apk_basename + '_InstallSize',
+                   'Estimated installed size', total_install_size, 'bytes')
+  ReportPerfResult(chartjson, apk_basename + '_InstallSize', 'APK size',
+                   total_apk_size, 'bytes')
+  ReportPerfResult(chartjson, apk_basename + '_TransferSize',
+                   'Transfer size (deflate)', transfer_size, 'bytes')
+
+
+def IsPakFileName(file_name):
+  """Returns whether the given file name ends with .pak or .lpak."""
+  return file_name.endswith('.pak') or file_name.endswith('.lpak')
+
+
+def PrintPakAnalysis(apk_filename, min_pak_resource_size):
+  """Print sizes of all resources in all pak files in |apk_filename|."""
+  print
+  print 'Analyzing pak files in %s...' % apk_filename
+
+  # A structure for holding details about a pak file.
+  Pak = collections.namedtuple(
+      'Pak', ['filename', 'compress_size', 'file_size', 'resources'])
+
+  # Build a list of Pak objets for each pak file.
+  paks = []
+  apk = zipfile.ZipFile(apk_filename, 'r')
+  try:
+    for i in (x for x in apk.infolist() if IsPakFileName(x.filename)):
+      with tempfile.NamedTemporaryFile() as f:
+        f.write(apk.read(i.filename))
+        f.flush()
+        paks.append(Pak(i.filename, i.compress_size, i.file_size,
+                        data_pack.DataPack.ReadDataPack(f.name).resources))
+  finally:
+    apk.close()
+
+  # Output the overall pak file summary.
+  total_files = len(paks)
+  total_compress_size = sum(pak.compress_size for pak in paks)
+  total_file_size = sum(pak.file_size for pak in paks)
+  print 'Total pak files: %d' % total_files
+  print 'Total compressed size: %s' % _FormatBytes(total_compress_size)
+  print 'Total uncompressed size: %s' % _FormatBytes(total_file_size)
+  print
+
+  # Output the table of details about all pak files.
+  print '%25s%11s%21s%21s' % (
+      'FILENAME', 'RESOURCES', 'COMPRESSED SIZE', 'UNCOMPRESSED SIZE')
+  for pak in sorted(paks, key=operator.attrgetter('file_size'), reverse=True):
+    print '%25s %10s %12s %6.2f%% %12s %6.2f%%' % (
+        pak.filename,
+        len(pak.resources),
+        _FormatBytes(pak.compress_size),
+        100.0 * pak.compress_size / total_compress_size,
+        _FormatBytes(pak.file_size),
+        100.0 * pak.file_size / total_file_size)
+
+  print
+  print 'Analyzing pak resources in %s...' % apk_filename
+
+  # Calculate aggregate stats about resources across pak files.
+  resource_count_map = collections.defaultdict(int)
+  resource_size_map = collections.defaultdict(int)
+  resource_overhead_bytes = 6
+  for pak in paks:
+    for r in pak.resources:
+      resource_count_map[r] += 1
+      resource_size_map[r] += len(pak.resources[r]) + resource_overhead_bytes
+
+  # Output the overall resource summary.
+  total_resource_size = sum(resource_size_map.values())
+  total_resource_count = len(resource_count_map)
+  assert total_resource_size <= total_file_size
+  print 'Total pak resources: %s' % total_resource_count
+  print 'Total uncompressed resource size: %s' % _FormatBytes(
+      total_resource_size)
+  print
+
+  resource_id_name_map = _GetResourceIdNameMap()
+
+  # Output the table of details about all resources across pak files.
+  print
+  print '%56s %5s %17s' % ('RESOURCE', 'COUNT', 'UNCOMPRESSED SIZE')
+  for i in sorted(resource_size_map, key=resource_size_map.get,
+                  reverse=True):
+    if resource_size_map[i] >= min_pak_resource_size:
+      print '%56s %5s %9s %6.2f%%' % (
+          resource_id_name_map.get(i, i),
+          resource_count_map[i],
+          _FormatBytes(resource_size_map[i]),
+          100.0 * resource_size_map[i] / total_resource_size)
+
+
+def _GetResourceIdNameMap():
+  """Returns a map of {resource_id: resource_name}."""
+  out_dir = constants.GetOutDirectory()
+  assert os.path.isdir(out_dir), 'Failed to locate out dir at %s' % out_dir
+  print 'Looking at resources in: %s' % out_dir
+
+  grit_headers = []
+  for root, _, files in os.walk(out_dir):
+    if root.endswith('grit'):
+      grit_headers += [os.path.join(root, f) for f in files if f.endswith('.h')]
+  assert grit_headers, 'Failed to find grit headers in %s' % out_dir
+
+  id_name_map = {}
+  for header in grit_headers:
+    with open(header, 'r') as f:
+      for line in f.readlines():
+        m = _RC_HEADER_RE.match(line.strip())
+        if m:
+          i = int(m.group('id'))
+          name = m.group('name')
+          if i in id_name_map and name != id_name_map[i]:
+            print 'WARNING: Resource ID conflict %s (%s vs %s)' % (
+                i, id_name_map[i], name)
+          id_name_map[i] = name
+  return id_name_map
+
+
+def PrintStaticInitializersCount(so_with_symbols_path, chartjson=None):
+  """Emits the performance result for static initializers found in the provided
+     shared library. Additionally, files for which static initializers were
+     found are printed on the standard output.
+
+     Args:
+       so_with_symbols_path: Path to the unstripped libchrome.so file.
+  """
+  # GetStaticInitializers uses get-static-initializers.py to get a list of all
+  # static initializers. This does not work on all archs (particularly arm).
+  # TODO(rnephew): Get rid of warning when crbug.com/585588 is fixed.
+  si_count = CountStaticInitializers(so_with_symbols_path)
+  static_initializers = GetStaticInitializers(so_with_symbols_path)
+  if si_count != len(static_initializers):
+    print ('There are %d files with static initializers, but '
+           'dump-static-initializers found %d:' %
+           (si_count, len(static_initializers)))
+  else:
+    print 'Found %d files with static initializers:' % si_count
+  print '\n'.join(static_initializers)
+
+  ReportPerfResult(chartjson, 'StaticInitializersCount', 'count',
+                   si_count, 'count')
+
+def _FormatBytes(byts):
+  """Pretty-print a number of bytes."""
+  if byts > 2**20.0:
+    byts /= 2**20.0
+    return '%.2fm' % byts
+  if byts > 2**10.0:
+    byts /= 2**10.0
+    return '%.2fk' % byts
+  return str(byts)
+
+
+def _CalculateCompressedSize(file_path):
+  CHUNK_SIZE = 256 * 1024
+  compressor = zlib.compressobj()
+  total_size = 0
+  with open(file_path, 'rb') as f:
+    for chunk in iter(lambda: f.read(CHUNK_SIZE), ''):
+      total_size += len(compressor.compress(chunk))
+  total_size += len(compressor.flush())
+  return total_size
+
+
+def main(argv):
+  usage = """Usage: %prog [options] file1 file2 ...
+
+Pass any number of files to graph their sizes. Any files with the extension
+'.apk' will be broken down into their components on a separate graph."""
+  option_parser = optparse.OptionParser(usage=usage)
+  option_parser.add_option('--so-path', help='Path to libchrome.so.')
+  option_parser.add_option('--so-with-symbols-path',
+                           help='Path to libchrome.so with symbols.')
+  option_parser.add_option('--min-pak-resource-size', type='int',
+                           default=20*1024,
+                           help='Minimum byte size of displayed pak resources.')
+  option_parser.add_option('--build_type', dest='build_type', default='Debug',
+                           help='Sets the build type, default is Debug.')
+  option_parser.add_option('--chromium-output-directory',
+                           help='Location of the build artifacts. '
+                                'Takes precidence over --build_type.')
+  option_parser.add_option('--chartjson', action="store_true",
+                           help='Sets output mode to chartjson.')
+  option_parser.add_option('--output-dir', default='.',
+                           help='Directory to save chartjson to.')
+  option_parser.add_option('-d', '--device',
+                           help='Dummy option for perf runner.')
+  options, args = option_parser.parse_args(argv)
+  files = args[1:]
+  chartjson = _BASE_CHART.copy() if options.chartjson else None
+
+  constants.SetBuildType(options.build_type)
+  if options.chromium_output_directory:
+    constants.SetOutputDirectory(options.chromium_output_directory)
+  constants.CheckOutputDirectory()
+
+  # For backward compatibilty with buildbot scripts, treat --so-path as just
+  # another file to print the size of. We don't need it for anything special any
+  # more.
+  if options.so_path:
+    files.append(options.so_path)
+
+  if not files:
+    option_parser.error('Must specify a file')
+
+  devil_chromium.Initialize()
+
+  if options.so_with_symbols_path:
+    PrintStaticInitializersCount(
+        options.so_with_symbols_path, chartjson=chartjson)
+
+  PrintResourceSizes(files, chartjson=chartjson)
+
+  for f in files:
+    if f.endswith('.apk'):
+      PrintApkAnalysis(f, chartjson=chartjson)
+      PrintPakAnalysis(f, options.min_pak_resource_size)
+
+  if chartjson:
+    results_path = os.path.join(options.output_dir, 'results-chart.json')
+    logging.critical('Dumping json to %s', results_path)
+    with open(results_path, 'w') as json_file:
+      json.dump(chartjson, json_file)
+
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv))
diff --git a/build/android/rezip.gyp b/build/android/rezip.gyp
new file mode 100644
index 0000000..dcb71a1
--- /dev/null
+++ b/build/android/rezip.gyp
@@ -0,0 +1,44 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Build the rezip build tool.
+{
+  'targets': [
+    {
+      # GN: //build/android/rezip:rezip
+      'target_name': 'rezip_apk_jar',
+      'type': 'none',
+      'variables': {
+        'java_in_dir': 'rezip',
+        'compile_stamp': '<(SHARED_INTERMEDIATE_DIR)/<(_target_name)/compile.stamp',
+        'javac_jar_path': '<(PRODUCT_DIR)/lib.java/rezip_apk.jar',
+      },
+      'actions': [
+        {
+          'action_name': 'javac_<(_target_name)',
+          'message': 'Compiling <(_target_name) java sources',
+          'variables': {
+            'java_sources': ['>!@(find >(java_in_dir) -name "*.java")'],
+          },
+          'inputs': [
+            '<(DEPTH)/build/android/gyp/util/build_utils.py',
+            '<(DEPTH)/build/android/gyp/javac.py',
+            '>@(java_sources)',
+          ],
+          'outputs': [
+            '<(compile_stamp)',
+            '<(javac_jar_path)',
+          ],
+          'action': [
+            'python', '<(DEPTH)/build/android/gyp/javac.py',
+            '--classpath=',
+            '--jar-path=<(javac_jar_path)',
+            '--stamp=<(compile_stamp)',
+            '>@(java_sources)',
+          ]
+        },
+      ],
+    }
+  ],
+}
diff --git a/build/android/rezip/BUILD.gn b/build/android/rezip/BUILD.gn
new file mode 100644
index 0000000..b9a39a6
--- /dev/null
+++ b/build/android/rezip/BUILD.gn
@@ -0,0 +1,11 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/config/android/rules.gni")
+
+# GYP: //build/android/rezip.gyp:rezip_apk_jar
+java_library("rezip") {
+  jar_path = "$root_build_dir/lib.java/rezip_apk.jar"
+  java_files = [ "RezipApk.java" ]
+}
diff --git a/build/android/rezip/RezipApk.java b/build/android/rezip/RezipApk.java
new file mode 100644
index 0000000..43d7544
--- /dev/null
+++ b/build/android/rezip/RezipApk.java
@@ -0,0 +1,448 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.Enumeration;
+import java.util.List;
+import java.util.jar.JarEntry;
+import java.util.jar.JarFile;
+import java.util.jar.JarOutputStream;
+import java.util.regex.Pattern;
+import java.util.zip.CRC32;
+
+/**
+ * Command line tool used to build APKs which support loading the native code library
+ * directly from the APK file. To construct the APK we rename the native library by
+ * adding the prefix "crazy." to the filename. This is done to prevent the Android
+ * Package Manager from extracting the library. The native code must be page aligned
+ * and uncompressed. The page alignment is implemented by adding a zero filled file
+ * in front of the the native code library. This tool is designed so that running
+ * SignApk and/or zipalign on the resulting APK does not break the page alignment.
+ * This is achieved by outputing the filenames in the same canonical order used
+ * by SignApk and adding the same alignment fields added by zipalign.
+ */
+class RezipApk {
+    // Alignment to use for non-compressed files (must match zipalign).
+    private static final int ALIGNMENT = 4;
+
+    // Alignment to use for non-compressed *.so files
+    private static final int LIBRARY_ALIGNMENT = 4096;
+
+    // Files matching this pattern are not copied to the output when adding alignment.
+    // When reordering and verifying the APK they are copied to the end of the file.
+    private static Pattern sMetaFilePattern =
+            Pattern.compile("^(META-INF/((.*)[.](SF|RSA|DSA)|com/android/otacert))|("
+                    + Pattern.quote(JarFile.MANIFEST_NAME) + ")$");
+
+    // Pattern for matching a shared library in the APK
+    private static Pattern sLibraryPattern = Pattern.compile("^lib/[^/]*/lib.*[.]so$");
+    // Pattern for match the crazy linker in the APK
+    private static Pattern sCrazyLinkerPattern =
+            Pattern.compile("^lib/[^/]*/libchromium_android_linker.so$");
+    // Pattern for matching a crazy loaded shared library in the APK
+    private static Pattern sCrazyLibraryPattern = Pattern.compile("^lib/[^/]*/crazy.lib.*[.]so$");
+
+    private static boolean isLibraryFilename(String filename) {
+        return sLibraryPattern.matcher(filename).matches()
+                && !sCrazyLinkerPattern.matcher(filename).matches();
+    }
+
+    private static boolean isCrazyLibraryFilename(String filename) {
+        return sCrazyLibraryPattern.matcher(filename).matches();
+    }
+
+    private static String renameLibraryForCrazyLinker(String filename) {
+        int lastSlash = filename.lastIndexOf('/');
+        // We rename the library, so that the Android Package Manager
+        // no longer extracts the library.
+        return filename.substring(0, lastSlash + 1) + "crazy." + filename.substring(lastSlash + 1);
+    }
+
+    /**
+     * Wraps another output stream, counting the number of bytes written.
+     */
+    private static class CountingOutputStream extends OutputStream {
+        private long mCount = 0;
+        private OutputStream mOut;
+
+        public CountingOutputStream(OutputStream out) {
+            this.mOut = out;
+        }
+
+        /** Returns the number of bytes written. */
+        public long getCount() {
+            return mCount;
+        }
+
+        @Override public void write(byte[] b, int off, int len) throws IOException {
+            mOut.write(b, off, len);
+            mCount += len;
+        }
+
+        @Override public void write(int b) throws IOException {
+            mOut.write(b);
+            mCount++;
+        }
+
+        @Override public void close() throws IOException {
+            mOut.close();
+        }
+
+        @Override public void flush() throws IOException {
+            mOut.flush();
+        }
+    }
+
+    private static String outputName(JarEntry entry, boolean rename) {
+        String inName = entry.getName();
+        if (rename && entry.getSize() > 0 && isLibraryFilename(inName)) {
+            return renameLibraryForCrazyLinker(inName);
+        }
+        return inName;
+    }
+
+    /**
+     * Comparator used to sort jar entries from the input file.
+     * Sorting is done based on the output filename (which maybe renamed).
+     * Filenames are in natural string order, except that filenames matching
+     * the meta-file pattern are always after other files. This is so the manifest
+     * and signature are at the end of the file after any alignment file.
+     */
+    private static class EntryComparator implements Comparator<JarEntry> {
+        private boolean mRename;
+
+        public EntryComparator(boolean rename) {
+            mRename = rename;
+        }
+
+        @Override
+        public int compare(JarEntry j1, JarEntry j2) {
+            String o1 = outputName(j1, mRename);
+            String o2 = outputName(j2, mRename);
+            boolean o1Matches = sMetaFilePattern.matcher(o1).matches();
+            boolean o2Matches = sMetaFilePattern.matcher(o2).matches();
+            if (o1Matches != o2Matches) {
+                return o1Matches ? 1 : -1;
+            } else {
+                return o1.compareTo(o2);
+            }
+        }
+    }
+
+    // Build an ordered list of jar entries. The jar entries from the input are
+    // sorted based on the output filenames (which maybe renamed). If |omitMetaFiles|
+    // is true do not include the jar entries for the META-INF files.
+    // Entries are ordered in the deterministic order used by SignApk.
+    private static List<JarEntry> getOutputFileOrderEntries(
+            JarFile jar, boolean omitMetaFiles, boolean rename) {
+        List<JarEntry> entries = new ArrayList<JarEntry>();
+        for (Enumeration<JarEntry> e = jar.entries(); e.hasMoreElements(); ) {
+            JarEntry entry = e.nextElement();
+            if (entry.isDirectory()) {
+                continue;
+            }
+            if (omitMetaFiles && sMetaFilePattern.matcher(entry.getName()).matches()) {
+                continue;
+            }
+            entries.add(entry);
+        }
+
+        // We sort the input entries by name. When present META-INF files
+        // are sorted to the end.
+        Collections.sort(entries, new EntryComparator(rename));
+        return entries;
+    }
+
+    /**
+     * Add a zero filled alignment file at this point in the zip file,
+     * The added file will be added before |name| and after |prevName|.
+     * The size of the alignment file is such that the location of the
+     * file |name| will be on a LIBRARY_ALIGNMENT boundary.
+     *
+     * Note this arrangement is devised so that running SignApk and/or zipalign on the resulting
+     * file will not alter the alignment.
+     *
+     * @param offset number of bytes into the output file at this point.
+     * @param timestamp time in millis since the epoch to include in the header.
+     * @param name the name of the library filename.
+     * @param prevName the name of the previous file in the archive (or null).
+     * @param out jar output stream to write the alignment file to.
+     *
+     * @throws IOException if the output file can not be written.
+     */
+    private static void addAlignmentFile(
+            long offset, long timestamp, String name, String prevName,
+            JarOutputStream out) throws IOException {
+
+        // Compute the start and alignment of the library, as if it was next.
+        int headerSize = JarFile.LOCHDR + name.length();
+        long libOffset = offset + headerSize;
+        int libNeeded = LIBRARY_ALIGNMENT - (int) (libOffset % LIBRARY_ALIGNMENT);
+        if (libNeeded == LIBRARY_ALIGNMENT) {
+            // Already aligned, no need to added alignment file.
+            return;
+        }
+
+        // Check that there is not another file between the library and the
+        // alignment file.
+        String alignName = name.substring(0, name.length() - 2) + "align";
+        if (prevName != null && prevName.compareTo(alignName) >= 0) {
+            throw new UnsupportedOperationException(
+                "Unable to insert alignment file, because there is "
+                + "another file in front of the file to be aligned. "
+                + "Other file: " + prevName + " Alignment file: " + alignName
+                + " file: " + name);
+        }
+
+        // Compute the size of the alignment file header.
+        headerSize = JarFile.LOCHDR + alignName.length();
+        // We are going to add an alignment file of type STORED. This file
+        // will itself induce a zipalign alignment adjustment.
+        int extraNeeded =
+                (ALIGNMENT - (int) ((offset + headerSize) % ALIGNMENT)) % ALIGNMENT;
+        headerSize += extraNeeded;
+
+        if (libNeeded < headerSize + 1) {
+            // The header was bigger than the alignment that we need, add another page.
+            libNeeded += LIBRARY_ALIGNMENT;
+        }
+        // Compute the size of the alignment file.
+        libNeeded -= headerSize;
+
+        // Build the header for the alignment file.
+        byte[] zeroBuffer = new byte[libNeeded];
+        JarEntry alignEntry = new JarEntry(alignName);
+        alignEntry.setMethod(JarEntry.STORED);
+        alignEntry.setSize(libNeeded);
+        alignEntry.setTime(timestamp);
+        CRC32 crc = new CRC32();
+        crc.update(zeroBuffer);
+        alignEntry.setCrc(crc.getValue());
+
+        if (extraNeeded != 0) {
+            alignEntry.setExtra(new byte[extraNeeded]);
+        }
+
+        // Output the alignment file.
+        out.putNextEntry(alignEntry);
+        out.write(zeroBuffer);
+        out.closeEntry();
+        out.flush();
+    }
+
+    // Make a JarEntry for the output file which corresponds to the input
+    // file. The output file will be called |name|. The output file will always
+    // be uncompressed (STORED). If the input is not STORED it is necessary to inflate
+    // it to compute the CRC and size of the output entry.
+    private static JarEntry makeStoredEntry(String name, JarEntry inEntry, JarFile in)
+            throws IOException {
+        JarEntry outEntry = new JarEntry(name);
+        outEntry.setMethod(JarEntry.STORED);
+
+        if (inEntry.getMethod() == JarEntry.STORED) {
+            outEntry.setCrc(inEntry.getCrc());
+            outEntry.setSize(inEntry.getSize());
+        } else {
+            // We are inflating the file. We need to compute the CRC and size.
+            byte[] buffer = new byte[4096];
+            CRC32 crc = new CRC32();
+            int size = 0;
+            int num;
+            InputStream data = in.getInputStream(inEntry);
+            while ((num = data.read(buffer)) > 0) {
+                crc.update(buffer, 0, num);
+                size += num;
+            }
+            data.close();
+            outEntry.setCrc(crc.getValue());
+            outEntry.setSize(size);
+        }
+        return outEntry;
+    }
+
+    /**
+     * Copy the contents of the input APK file to the output APK file. If |rename| is
+     * true then non-empty libraries (*.so) in the input will be renamed by prefixing
+     * "crazy.". This is done to prevent the Android Package Manager extracting the
+     * library. Note the crazy linker itself is not renamed, for bootstrapping reasons.
+     * Empty libraries are not renamed (they are in the APK to workaround a bug where
+     * the Android Package Manager fails to delete old versions when upgrading).
+     * There must be exactly one "crazy" library in the output stream. The "crazy"
+     * library will be uncompressed and page aligned in the output stream. Page
+     * alignment is implemented by adding a zero filled file, regular alignment is
+     * implemented by adding a zero filled extra field to the zip file header. If
+     * |addAlignment| is true a page alignment file is added, otherwise the "crazy"
+     * library must already be page aligned. Care is taken so that the output is generated
+     * in the same way as SignApk. This is important so that running SignApk and
+     * zipalign on the output does not break the page alignment. The archive may not
+     * contain a "*.apk" as SignApk has special nested signing logic that we do not
+     * support.
+     *
+     * @param in The input APK File.
+     * @param out The output APK stream.
+     * @param countOut Counting output stream (to measure the current offset).
+     * @param addAlignment Whether to add the alignment file or just check.
+     * @param rename Whether to rename libraries to be "crazy".
+     *
+     * @throws IOException if the output file can not be written.
+     */
+    private static void rezip(
+            JarFile in, JarOutputStream out, CountingOutputStream countOut,
+            boolean addAlignment, boolean rename) throws IOException {
+
+        List<JarEntry> entries = getOutputFileOrderEntries(in, addAlignment, rename);
+        long timestamp = System.currentTimeMillis();
+        byte[] buffer = new byte[4096];
+        boolean firstEntry = true;
+        String prevName = null;
+        int numCrazy = 0;
+        for (JarEntry inEntry : entries) {
+            // Rename files, if specied.
+            String name = outputName(inEntry, rename);
+            if (name.endsWith(".apk")) {
+                throw new UnsupportedOperationException(
+                        "Nested APKs are not supported: " + name);
+            }
+
+            // Build the header.
+            JarEntry outEntry = null;
+            boolean isCrazy = isCrazyLibraryFilename(name);
+            if (isCrazy) {
+                // "crazy" libraries are alway output uncompressed (STORED).
+                outEntry = makeStoredEntry(name, inEntry, in);
+                numCrazy++;
+                if (numCrazy > 1) {
+                    throw new UnsupportedOperationException(
+                            "Found more than one library\n"
+                            + "Multiple libraries are not supported for APKs that use "
+                            + "'load_library_from_zip'.\n"
+                            + "See crbug/388223.\n"
+                            + "Note, check that your build is clean.\n"
+                            + "An unclean build can incorrectly incorporate old "
+                            + "libraries in the APK.");
+                }
+            } else if (inEntry.getMethod() == JarEntry.STORED) {
+                // Preserve the STORED method of the input entry.
+                outEntry = new JarEntry(inEntry);
+                outEntry.setExtra(null);
+            } else {
+                // Create a new entry so that the compressed len is recomputed.
+                outEntry = new JarEntry(name);
+            }
+            outEntry.setTime(timestamp);
+
+            // Compute and add alignment
+            long offset = countOut.getCount();
+            if (firstEntry) {
+                // The first entry in a jar file has an extra field of
+                // four bytes that you can't get rid of; any extra
+                // data you specify in the JarEntry is appended to
+                // these forced four bytes.  This is JAR_MAGIC in
+                // JarOutputStream; the bytes are 0xfeca0000.
+                firstEntry = false;
+                offset += 4;
+            }
+            if (outEntry.getMethod() == JarEntry.STORED) {
+                if (isCrazy) {
+                    if (addAlignment) {
+                        addAlignmentFile(offset, timestamp, name, prevName, out);
+                    }
+                    // We check that we did indeed get to a page boundary.
+                    offset = countOut.getCount() + JarFile.LOCHDR + name.length();
+                    if ((offset % LIBRARY_ALIGNMENT) != 0) {
+                        throw new AssertionError(
+                                "Library was not page aligned when verifying page alignment. "
+                                + "Library name: " + name + " Expected alignment: "
+                                + LIBRARY_ALIGNMENT + "Offset: " + offset + " Error: "
+                                + (offset % LIBRARY_ALIGNMENT));
+                    }
+                } else {
+                    // This is equivalent to zipalign.
+                    offset += JarFile.LOCHDR + name.length();
+                    int needed = (ALIGNMENT - (int) (offset % ALIGNMENT)) % ALIGNMENT;
+                    if (needed != 0) {
+                        outEntry.setExtra(new byte[needed]);
+                    }
+                }
+            }
+            out.putNextEntry(outEntry);
+
+            // Copy the data from the input to the output
+            int num;
+            InputStream data = in.getInputStream(inEntry);
+            while ((num = data.read(buffer)) > 0) {
+                out.write(buffer, 0, num);
+            }
+            data.close();
+            out.closeEntry();
+            out.flush();
+            prevName = name;
+        }
+        if (numCrazy == 0) {
+            throw new AssertionError("There was no crazy library in the archive");
+        }
+    }
+
+    private static void usage() {
+        System.err.println("Usage: prealignapk (addalignment|reorder) input.apk output.apk");
+        System.err.println("\"crazy\" libraries are always inflated in the output");
+        System.err.println(
+                "  renamealign  - rename libraries with \"crazy.\" prefix and add alignment file");
+        System.err.println("  align        - add alignment file");
+        System.err.println("  reorder      - re-creates canonical ordering and checks alignment");
+        System.exit(2);
+    }
+
+    public static void main(String[] args) throws IOException {
+        if (args.length != 3) usage();
+
+        boolean addAlignment = false;
+        boolean rename = false;
+        if (args[0].equals("renamealign")) {
+            // Normal case. Before signing we rename the library and add an alignment file.
+            addAlignment = true;
+            rename = true;
+        } else if (args[0].equals("align")) {
+            // LGPL compliance case. Before signing, we add an alignment file to a
+            // reconstructed APK which already contains the "crazy" library.
+            addAlignment = true;
+            rename = false;
+        } else if (args[0].equals("reorder")) {
+            // Normal case. After jarsigning we write the file in the canonical order and check.
+            addAlignment = false;
+        } else {
+            usage();
+        }
+
+        String inputFilename = args[1];
+        String outputFilename = args[2];
+
+        JarFile inputJar = null;
+        FileOutputStream outputFile = null;
+
+        try {
+            inputJar = new JarFile(new File(inputFilename), true);
+            outputFile = new FileOutputStream(outputFilename);
+
+            CountingOutputStream outCount = new CountingOutputStream(outputFile);
+            JarOutputStream outputJar = new JarOutputStream(outCount);
+
+            // Match the compression level used by SignApk.
+            outputJar.setLevel(9);
+
+            rezip(inputJar, outputJar, outCount, addAlignment, rename);
+            outputJar.close();
+        } finally {
+            if (inputJar != null) inputJar.close();
+            if (outputFile != null) outputFile.close();
+        }
+    }
+}
diff --git a/build/android/screenshot.py b/build/android/screenshot.py
new file mode 100755
index 0000000..6ab9060
--- /dev/null
+++ b/build/android/screenshot.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+import devil_chromium
+from devil.android.tools import screenshot
+
+if __name__ == '__main__':
+  devil_chromium.Initialize()
+  sys.exit(screenshot.main())
diff --git a/build/android/setup.gyp b/build/android/setup.gyp
new file mode 100644
index 0000000..0ef0531
--- /dev/null
+++ b/build/android/setup.gyp
@@ -0,0 +1,112 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+  'conditions': [
+    ['android_must_copy_system_libraries == 1', {
+      'targets': [
+        {
+          # These libraries from the Android ndk are required to be packaged with
+          # any APK that is built with them. build/java_apk.gypi expects any
+          # libraries that should be packaged with the apk to be in
+          # <(SHARED_LIB_DIR)
+          'target_name': 'copy_system_libraries',
+          'type': 'none',
+          'copies': [
+            {
+              'destination': '<(SHARED_LIB_DIR)/',
+              'files': [
+                '<(android_libcpp_libs_dir)/libc++_shared.so',
+              ],
+            },
+          ],
+        },
+      ],
+    }],
+  ],
+  'targets': [
+    {
+      'target_name': 'get_build_device_configurations',
+      'type': 'none',
+      'actions': [
+        {
+          'action_name': 'get configurations',
+          'inputs': [
+            'gyp/util/build_device.py',
+            'gyp/get_device_configuration.py',
+          ],
+          'outputs': [
+            '<(build_device_config_path)',
+            '<(build_device_config_path).fake',
+          ],
+          'action': [
+            'python', 'gyp/get_device_configuration.py',
+            '--output=<(build_device_config_path)',
+            '--output-directory=<(PRODUCT_DIR)',
+          ],
+        }
+      ],
+    },
+    {
+      # Target for creating common output build directories. Creating output
+      # dirs beforehand ensures that build scripts can assume these folders to
+      # exist and there are no race conditions resulting from build scripts
+      # trying to create these directories.
+      # The build/java.gypi target depends on this target.
+      'target_name': 'build_output_dirs',
+      'type': 'none',
+      'actions': [
+        {
+          'action_name': 'create_java_output_dirs',
+          'variables' : {
+            'output_dirs' : [
+              '<(PRODUCT_DIR)/apks',
+              '<(PRODUCT_DIR)/lib.java',
+              '<(PRODUCT_DIR)/test.lib.java',
+            ]
+          },
+          'inputs' : [],
+          # By not specifying any outputs, we ensure that this command isn't
+          # re-run when the output directories are touched (i.e. apks are
+          # written to them).
+          'outputs': [''],
+          'action': [
+            'mkdir',
+            '-p',
+            '<@(output_dirs)',
+          ],
+        },
+      ],
+    }, # build_output_dirs
+    {
+      'target_name': 'sun_tools_java',
+      'type': 'none',
+      'variables': {
+        'found_jar_path': '<(PRODUCT_DIR)/sun_tools_java/tools.jar',
+        'jar_path': '<(found_jar_path)',
+      },
+      'includes': [
+        '../../build/host_prebuilt_jar.gypi',
+      ],
+      'actions': [
+        {
+          'action_name': 'find_sun_tools_jar',
+          'variables' : {
+          },
+          'inputs' : [
+            'gyp/find_sun_tools_jar.py',
+            'gyp/util/build_utils.py',
+          ],
+          'outputs': [
+            '<(found_jar_path)',
+          ],
+          'action': [
+            'python', 'gyp/find_sun_tools_jar.py',
+            '--output', '<(found_jar_path)',
+          ],
+        },
+      ],
+    }, # sun_tools_java
+  ]
+}
+
diff --git a/build/android/strip_native_libraries.gypi b/build/android/strip_native_libraries.gypi
new file mode 100644
index 0000000..be8a5cb
--- /dev/null
+++ b/build/android/strip_native_libraries.gypi
@@ -0,0 +1,54 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This file is meant to be included into an action to provide a rule that strips
+# native libraries.
+#
+# To use this, create a gyp target with the following form:
+#  {
+#    'action_name': 'strip_native_libraries',
+#    'actions': [
+#      'variables': {
+#        'ordered_libraries_file': 'file generated by write_ordered_libraries'
+#        'input_paths': 'files to be added to the list of inputs'
+#        'stamp': 'file to touch when the action is complete'
+#        'stripped_libraries_dir': 'directory to store stripped libraries',
+#      },
+#      'includes': [ '../../build/android/strip_native_libraries.gypi' ],
+#    ],
+#  },
+#
+
+{
+  'message': 'Stripping libraries for <(_target_name)',
+  'variables': {
+    'input_paths': [],
+  },
+  'inputs': [
+    '<(DEPTH)/build/android/gyp/util/build_utils.py',
+    '<(DEPTH)/build/android/gyp/strip_library_for_device.py',
+    '<(ordered_libraries_file)',
+    '>@(input_paths)',
+  ],
+  'outputs': [
+    '<(stamp)',
+  ],
+  'conditions': [
+    ['android_must_copy_system_libraries == 1', {
+      # Add a fake output to force the build to always re-run this step. This
+      # is required because the real inputs are not known at gyp-time and
+      # changing base.so may not trigger changes to dependent libraries.
+      'outputs': [ '<(stamp).fake' ]
+    }],
+  ],
+  'action': [
+    'python', '<(DEPTH)/build/android/gyp/strip_library_for_device.py',
+    '--android-strip=<(android_strip)',
+    '--android-strip-arg=--strip-unneeded',
+    '--stripped-libraries-dir=<(stripped_libraries_dir)',
+    '--libraries-dir=<(SHARED_LIB_DIR),<(PRODUCT_DIR)',
+    '--libraries=@FileArg(<(ordered_libraries_file):libraries)',
+    '--stamp=<(stamp)',
+  ],
+}
diff --git a/build/android/test_runner.gypi b/build/android/test_runner.gypi
new file mode 100644
index 0000000..5127e2a
--- /dev/null
+++ b/build/android/test_runner.gypi
@@ -0,0 +1,107 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Generates a script in the output bin directory which runs the test
+# target using the test runner script in build/android/pylib/test_runner.py.
+#
+# To use this, include this file in a gtest or instrumentation test target.
+# {
+#   'target_name': 'gtest',
+#   'type': 'none',
+#   'variables': {
+#     'test_type': 'gtest',  # string
+#     'test_suite_name': 'gtest_suite'  # string
+#     'isolate_file': 'path/to/gtest.isolate'  # string
+#   },
+#   'includes': ['path/to/this/gypi/file'],
+# }
+#
+# {
+#   'target_name': 'instrumentation_apk',
+#   'type': 'none',
+#   'variables': {
+#     'test_type': 'instrumentation',  # string
+#     'apk_name': 'TestApk'  # string
+#     'isolate_file': 'path/to/instrumentation_test.isolate'  # string
+#   },
+#   'includes': ['path/to/this/gypi/file'],
+# }
+#
+# {
+#   'target_name': 'junit_test',
+#   'type': 'none',
+#   'variables': {
+#     'test_type': 'junit',  # string
+#   },
+#   'includes': ['path/to/this/gypi/file'],
+# }
+#
+
+{
+  'variables': {
+    'variables': {
+      'additional_apks%': [],
+      'isolate_file%': '',
+      'shard_timeout%': '',
+      'test_runner_path%': '',
+    },
+    'test_runner_args': ['--output-directory', '<(PRODUCT_DIR)'],
+    'conditions': [
+      ['test_type == "gtest"', {
+        'test_runner_args': ['--suite', '<(test_suite_name)'],
+        'script_name': 'run_<(test_suite_name)',
+      }],
+      ['test_type == "instrumentation"', {
+        'test_runner_args': [
+          '--apk-under-test', '>(tested_apk_path)',
+          '--test-apk', '>(final_apk_path)',
+        ],
+        'script_name': 'run_<(_target_name)',
+        'conditions': [
+          ['emma_instrument != 0', {
+            'test_runner_args': [
+              '--coverage-dir', '<(PRODUCT_DIR)/coverage',
+            ],
+          }],
+        ],
+      }],
+      ['test_type == "junit"', {
+        'test_runner_args': ['--test-suite', '<(_target_name)'],
+        'script_name': 'run_<(_target_name)',
+      }],
+      ['additional_apks != []', {
+        'test_runner_args': ['--additional-apk-list', '>(additional_apks)'],
+      }],
+      ['isolate_file != ""', {
+        'test_runner_args': ['--isolate-file-path', '<(isolate_file)']
+      }],
+      ['shard_timeout != ""', {
+        'test_runner_args': ['--shard-timeout', '<(shard_timeout)']
+      }],
+      ['test_runner_path != ""', {
+        'test_runner_args': ['--test-runner-path', '<(test_runner_path)']
+      }],
+    ],
+  },
+  'actions': [
+    {
+      'action_name': 'create_test_runner_script_<(script_name)',
+      'message': 'Creating test runner script <(script_name)',
+      'variables': {
+        'script_output_path': '<(PRODUCT_DIR)/bin/<(script_name)',
+      },
+      'inputs': [
+        '<(DEPTH)/build/android/gyp/create_test_runner_script.py',
+      ],
+      'outputs': [
+        '<(script_output_path)'
+      ],
+      'action': [
+        'python', '<(DEPTH)/build/android/gyp/create_test_runner_script.py',
+        '--script-output-path=<(script_output_path)',
+        '<(test_type)', '<@(test_runner_args)',
+      ],
+    },
+  ],
+}
diff --git a/build/android/test_runner.py b/build/android/test_runner.py
new file mode 100755
index 0000000..23c4039
--- /dev/null
+++ b/build/android/test_runner.py
@@ -0,0 +1,972 @@
+#!/usr/bin/env python
+#
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Runs all types of tests from one unified interface."""
+
+import argparse
+import collections
+import itertools
+import logging
+import os
+import signal
+import sys
+import threading
+import unittest
+
+import devil_chromium
+from devil import base_error
+from devil import devil_env
+from devil.android import device_blacklist
+from devil.android import device_errors
+from devil.android import device_utils
+from devil.android import forwarder
+from devil.android import ports
+from devil.utils import reraiser_thread
+from devil.utils import run_tests_helper
+
+from pylib import constants
+from pylib.constants import host_paths
+from pylib.base import base_test_result
+from pylib.base import environment_factory
+from pylib.base import test_dispatcher
+from pylib.base import test_instance_factory
+from pylib.base import test_run_factory
+from pylib.linker import setup as linker_setup
+from pylib.junit import setup as junit_setup
+from pylib.junit import test_dispatcher as junit_dispatcher
+from pylib.monkey import setup as monkey_setup
+from pylib.monkey import test_options as monkey_test_options
+from pylib.perf import setup as perf_setup
+from pylib.perf import test_options as perf_test_options
+from pylib.perf import test_runner as perf_test_runner
+from pylib.results import json_results
+from pylib.results import report_results
+
+
+_DEVIL_STATIC_CONFIG_FILE = os.path.abspath(os.path.join(
+    host_paths.DIR_SOURCE_ROOT, 'build', 'android', 'devil_config.json'))
+
+
+def AddCommonOptions(parser):
+  """Adds all common options to |parser|."""
+
+  group = parser.add_argument_group('Common Options')
+
+  default_build_type = os.environ.get('BUILDTYPE', 'Debug')
+
+  debug_or_release_group = group.add_mutually_exclusive_group()
+  debug_or_release_group.add_argument(
+      '--debug', action='store_const', const='Debug', dest='build_type',
+      default=default_build_type,
+      help=('If set, run test suites under out/Debug. '
+            'Default is env var BUILDTYPE or Debug.'))
+  debug_or_release_group.add_argument(
+      '--release', action='store_const', const='Release', dest='build_type',
+      help=('If set, run test suites under out/Release. '
+            'Default is env var BUILDTYPE or Debug.'))
+
+  group.add_argument('--build-directory', dest='build_directory',
+                     help=('Path to the directory in which build files are'
+                           ' located (should not include build type)'))
+  group.add_argument('--output-directory', dest='output_directory',
+                     help=('Path to the directory in which build files are'
+                           ' located (must include build type). This will take'
+                           ' precedence over --debug, --release and'
+                           ' --build-directory'))
+  group.add_argument('--num_retries', '--num-retries', dest='num_retries',
+                     type=int, default=2,
+                     help=('Number of retries for a test before '
+                           'giving up (default: %(default)s).'))
+  group.add_argument('-v',
+                     '--verbose',
+                     dest='verbose_count',
+                     default=0,
+                     action='count',
+                     help='Verbose level (multiple times for more)')
+  group.add_argument('--flakiness-dashboard-server',
+                     dest='flakiness_dashboard_server',
+                     help=('Address of the server that is hosting the '
+                           'Chrome for Android flakiness dashboard.'))
+  group.add_argument('--enable-platform-mode', action='store_true',
+                     help=('Run the test scripts in platform mode, which '
+                           'conceptually separates the test runner from the '
+                           '"device" (local or remote, real or emulated) on '
+                           'which the tests are running. [experimental]'))
+  group.add_argument('-e', '--environment', default='local',
+                     choices=constants.VALID_ENVIRONMENTS,
+                     help='Test environment to run in (default: %(default)s).')
+  group.add_argument('--adb-path',
+                     help=('Specify the absolute path of the adb binary that '
+                           'should be used.'))
+  group.add_argument('--json-results-file', '--test-launcher-summary-output',
+                     dest='json_results_file',
+                     help='If set, will dump results in JSON form '
+                          'to specified file.')
+
+  logcat_output_group = group.add_mutually_exclusive_group()
+  logcat_output_group.add_argument(
+      '--logcat-output-dir',
+      help='If set, will dump logcats recorded during test run to directory. '
+           'File names will be the device ids with timestamps.')
+  logcat_output_group.add_argument(
+      '--logcat-output-file',
+      help='If set, will merge logcats recorded during test run and dump them '
+           'to the specified file.')
+
+  class FastLocalDevAction(argparse.Action):
+    def __call__(self, parser, namespace, values, option_string=None):
+      namespace.verbose_count = max(namespace.verbose_count, 1)
+      namespace.num_retries = 0
+      namespace.enable_device_cache = True
+      namespace.enable_concurrent_adb = True
+      namespace.skip_clear_data = True
+      namespace.extract_test_list_from_filter = True
+
+  group.add_argument('--fast-local-dev', type=bool, nargs=0,
+                     action=FastLocalDevAction,
+                     help='Alias for: --verbose --num-retries=0 '
+                          '--enable-device-cache --enable-concurrent-adb '
+                          '--skip-clear-data --extract-test-list-from-filter')
+
+def ProcessCommonOptions(args):
+  """Processes and handles all common options."""
+  run_tests_helper.SetLogLevel(args.verbose_count)
+  constants.SetBuildType(args.build_type)
+  if args.build_directory:
+    constants.SetBuildDirectory(args.build_directory)
+  if args.output_directory:
+    constants.SetOutputDirectory(args.output_directory)
+
+  devil_custom_deps = None
+  if args.adb_path:
+    devil_custom_deps = {
+      'adb': {
+        devil_env.GetPlatform(): [args.adb_path]
+      }
+    }
+
+  devil_chromium.Initialize(
+      output_directory=constants.GetOutDirectory(),
+      custom_deps=devil_custom_deps)
+
+  # Some things such as Forwarder require ADB to be in the environment path.
+  adb_dir = os.path.dirname(constants.GetAdbPath())
+  if adb_dir and adb_dir not in os.environ['PATH'].split(os.pathsep):
+    os.environ['PATH'] = adb_dir + os.pathsep + os.environ['PATH']
+
+
+def AddRemoteDeviceOptions(parser):
+  group = parser.add_argument_group('Remote Device Options')
+
+  group.add_argument('--trigger',
+                     help=('Only triggers the test if set. Stores test_run_id '
+                           'in given file path. '))
+  group.add_argument('--collect',
+                     help=('Only collects the test results if set. '
+                           'Gets test_run_id from given file path.'))
+  group.add_argument('--remote-device', action='append',
+                     help='Device type to run test on.')
+  group.add_argument('--results-path',
+                     help='File path to download results to.')
+  group.add_argument('--api-protocol',
+                     help='HTTP protocol to use. (http or https)')
+  group.add_argument('--api-address',
+                     help='Address to send HTTP requests.')
+  group.add_argument('--api-port',
+                     help='Port to send HTTP requests to.')
+  group.add_argument('--runner-type',
+                     help='Type of test to run as.')
+  group.add_argument('--runner-package',
+                     help='Package name of test.')
+  group.add_argument('--device-type',
+                     choices=constants.VALID_DEVICE_TYPES,
+                     help=('Type of device to run on. iOS or android'))
+  group.add_argument('--device-oem', action='append',
+                     help='Device OEM to run on.')
+  group.add_argument('--remote-device-file',
+                     help=('File with JSON to select remote device. '
+                           'Overrides all other flags.'))
+  group.add_argument('--remote-device-timeout', type=int,
+                     help='Times to retry finding remote device')
+  group.add_argument('--network-config', type=int,
+                     help='Integer that specifies the network environment '
+                          'that the tests will be run in.')
+  group.add_argument('--test-timeout', type=int,
+                     help='Test run timeout in seconds.')
+
+  device_os_group = group.add_mutually_exclusive_group()
+  device_os_group.add_argument('--remote-device-minimum-os',
+                               help='Minimum OS on device.')
+  device_os_group.add_argument('--remote-device-os', action='append',
+                               help='OS to have on the device.')
+
+  api_secret_group = group.add_mutually_exclusive_group()
+  api_secret_group.add_argument('--api-secret', default='',
+                                help='API secret for remote devices.')
+  api_secret_group.add_argument('--api-secret-file', default='',
+                                help='Path to file that contains API secret.')
+
+  api_key_group = group.add_mutually_exclusive_group()
+  api_key_group.add_argument('--api-key', default='',
+                             help='API key for remote devices.')
+  api_key_group.add_argument('--api-key-file', default='',
+                             help='Path to file that contains API key.')
+
+
+def AddDeviceOptions(parser):
+  """Adds device options to |parser|."""
+  group = parser.add_argument_group(title='Device Options')
+  group.add_argument('--tool',
+                     dest='tool',
+                     help=('Run the test under a tool '
+                           '(use --tool help to list them)'))
+  group.add_argument('-d', '--device', dest='test_device',
+                     help=('Target device for the test suite '
+                           'to run on.'))
+  group.add_argument('--blacklist-file', help='Device blacklist file.')
+  group.add_argument('--enable-device-cache', action='store_true',
+                     help='Cache device state to disk between runs')
+  group.add_argument('--enable-concurrent-adb', action='store_true',
+                     help='Run multiple adb commands at the same time, even '
+                          'for the same device.')
+  group.add_argument('--skip-clear-data', action='store_true',
+                     help='Do not wipe app data between tests. Use this to '
+                     'speed up local development and never on bots '
+                     '(increases flakiness)')
+
+
+def AddGTestOptions(parser):
+  """Adds gtest options to |parser|."""
+
+  group = parser.add_argument_group('GTest Options')
+  group.add_argument('-s', '--suite', dest='suite_name',
+                     nargs='+', metavar='SUITE_NAME', required=True,
+                     help='Executable name of the test suite to run.')
+  group.add_argument('--executable-dist-dir',
+                     help="Path to executable's dist directory for native"
+                          " (non-apk) tests.")
+  group.add_argument('--test-apk-incremental-install-script',
+                     help='Path to install script for the test apk.')
+  group.add_argument('--gtest_also_run_disabled_tests',
+                     '--gtest-also-run-disabled-tests',
+                     dest='run_disabled', action='store_true',
+                     help='Also run disabled tests if applicable.')
+  group.add_argument('-a', '--test-arguments', dest='test_arguments',
+                     default='',
+                     help='Additional arguments to pass to the test.')
+  group.add_argument('-t', '--shard-timeout',
+                     dest='shard_timeout', type=int, default=120,
+                     help='Timeout to wait for each test '
+                          '(default: %(default)s).')
+  group.add_argument('--isolate_file_path',
+                     '--isolate-file-path',
+                     dest='isolate_file_path',
+                     help='.isolate file path to override the default '
+                          'path')
+  group.add_argument('--app-data-file', action='append', dest='app_data_files',
+                     help='A file path relative to the app data directory '
+                          'that should be saved to the host.')
+  group.add_argument('--app-data-file-dir',
+                     help='Host directory to which app data files will be'
+                          ' saved. Used with --app-data-file.')
+  group.add_argument('--delete-stale-data', dest='delete_stale_data',
+                     action='store_true',
+                     help='Delete stale test data on the device.')
+  group.add_argument('--repeat', '--gtest_repeat', '--gtest-repeat',
+                     dest='repeat', type=int, default=0,
+                     help='Number of times to repeat the specified set of '
+                          'tests.')
+  group.add_argument('--break-on-failure', '--break_on_failure',
+                     dest='break_on_failure', action='store_true',
+                     help='Whether to break on failure.')
+  group.add_argument('--extract-test-list-from-filter',
+                     action='store_true',
+                     help='When a test filter is specified, and the list of '
+                          'tests can be determined from it, skip querying the '
+                          'device for the list of all tests. Speeds up local '
+                          'development, but is not safe to use on bots ('
+                          'http://crbug.com/549214')
+
+  filter_group = group.add_mutually_exclusive_group()
+  filter_group.add_argument('-f', '--gtest_filter', '--gtest-filter',
+                            dest='test_filter',
+                            help='googletest-style filter string.')
+  filter_group.add_argument('--gtest-filter-file', dest='test_filter_file',
+                            help='Path to file that contains googletest-style '
+                                  'filter strings. (Lines will be joined with '
+                                  '":" to create a single filter string.)')
+
+  AddDeviceOptions(parser)
+  AddCommonOptions(parser)
+  AddRemoteDeviceOptions(parser)
+
+
+def AddLinkerTestOptions(parser):
+  group = parser.add_argument_group('Linker Test Options')
+  group.add_argument('-f', '--gtest-filter', dest='test_filter',
+                     help='googletest-style filter string.')
+  AddCommonOptions(parser)
+  AddDeviceOptions(parser)
+
+
+def AddJavaTestOptions(argument_group):
+  """Adds the Java test options to |option_parser|."""
+
+  argument_group.add_argument(
+      '-f', '--test-filter', '--gtest_filter', '--gtest-filter',
+      dest='test_filter',
+      help=('Test filter (if not fully qualified, will run all matches).'))
+  argument_group.add_argument(
+      '--repeat', dest='repeat', type=int, default=0,
+      help='Number of times to repeat the specified set of tests.')
+  argument_group.add_argument(
+      '--break-on-failure', '--break_on_failure',
+      dest='break_on_failure', action='store_true',
+      help='Whether to break on failure.')
+  argument_group.add_argument(
+      '-A', '--annotation', dest='annotation_str',
+      help=('Comma-separated list of annotations. Run only tests with any of '
+            'the given annotations. An annotation can be either a key or a '
+            'key-values pair. A test that has no annotation is considered '
+            '"SmallTest".'))
+  argument_group.add_argument(
+      '-E', '--exclude-annotation', dest='exclude_annotation_str',
+      help=('Comma-separated list of annotations. Exclude tests with these '
+            'annotations.'))
+  argument_group.add_argument(
+      '--screenshot', dest='screenshot_failures', action='store_true',
+      help='Capture screenshots of test failures')
+  argument_group.add_argument(
+      '--save-perf-json', action='store_true',
+      help='Saves the JSON file for each UI Perf test.')
+  argument_group.add_argument(
+      '--official-build', action='store_true', help='Run official build tests.')
+  argument_group.add_argument(
+      '--test_data', '--test-data', action='append', default=[],
+      help=('Each instance defines a directory of test data that should be '
+            'copied to the target(s) before running the tests. The argument '
+            'should be of the form <target>:<source>, <target> is relative to '
+            'the device data directory, and <source> is relative to the '
+            'chromium build directory.'))
+  argument_group.add_argument(
+      '--disable-dalvik-asserts', dest='set_asserts', action='store_false',
+      default=True, help='Removes the dalvik.vm.enableassertions property')
+
+
+
+def ProcessJavaTestOptions(args):
+  """Processes options/arguments and populates |options| with defaults."""
+
+  # TODO(jbudorick): Handle most of this function in argparse.
+  if args.annotation_str:
+    args.annotations = args.annotation_str.split(',')
+  elif args.test_filter:
+    args.annotations = []
+  else:
+    args.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest',
+                        'EnormousTest', 'IntegrationTest']
+
+  if args.exclude_annotation_str:
+    args.exclude_annotations = args.exclude_annotation_str.split(',')
+  else:
+    args.exclude_annotations = []
+
+
+def AddInstrumentationTestOptions(parser):
+  """Adds Instrumentation test options to |parser|."""
+
+  parser.usage = '%(prog)s [options]'
+
+  group = parser.add_argument_group('Instrumentation Test Options')
+  AddJavaTestOptions(group)
+
+  java_or_python_group = group.add_mutually_exclusive_group()
+  java_or_python_group.add_argument(
+      '-j', '--java-only', action='store_false',
+      dest='run_python_tests', default=True, help='Run only the Java tests.')
+  java_or_python_group.add_argument(
+      '-p', '--python-only', action='store_false',
+      dest='run_java_tests', default=True,
+      help='DEPRECATED')
+
+  group.add_argument('--host-driven-root',
+                     help='DEPRECATED')
+  group.add_argument('-w', '--wait_debugger', dest='wait_for_debugger',
+                     action='store_true',
+                     help='Wait for debugger.')
+  group.add_argument('--apk-under-test',
+                     help='Path or name of the apk under test.')
+  group.add_argument('--apk-under-test-incremental-install-script',
+                     help='Path to install script for the --apk-under-test.')
+  group.add_argument('--test-apk', required=True,
+                     help='Path or name of the apk containing the tests '
+                          '(name is without the .apk extension; '
+                          'e.g. "ContentShellTest").')
+  group.add_argument('--test-apk-incremental-install-script',
+                     help='Path to install script for the --test-apk.')
+  group.add_argument('--additional-apk', action='append',
+                     dest='additional_apks', default=[],
+                     help='Additional apk that must be installed on '
+                          'the device when the tests are run')
+  group.add_argument('--coverage-dir',
+                     help=('Directory in which to place all generated '
+                           'EMMA coverage files.'))
+  group.add_argument('--device-flags', dest='device_flags', default='',
+                     help='The relative filepath to a file containing '
+                          'command-line flags to set on the device')
+  group.add_argument('--device-flags-file', default='',
+                     help='The relative filepath to a file containing '
+                          'command-line flags to set on the device')
+  group.add_argument('--isolate_file_path',
+                     '--isolate-file-path',
+                     dest='isolate_file_path',
+                     help='.isolate file path to override the default '
+                          'path')
+  group.add_argument('--delete-stale-data', dest='delete_stale_data',
+                     action='store_true',
+                     help='Delete stale test data on the device.')
+  group.add_argument('--timeout-scale', type=float,
+                     help='Factor by which timeouts should be scaled.')
+  group.add_argument('--strict-mode', dest='strict_mode', default='testing',
+                     help='StrictMode command-line flag set on the device, '
+                          'death/testing to kill the process, off to stop '
+                          'checking, flash to flash only. Default testing.')
+
+  AddCommonOptions(parser)
+  AddDeviceOptions(parser)
+  AddRemoteDeviceOptions(parser)
+
+
+def AddJUnitTestOptions(parser):
+  """Adds junit test options to |parser|."""
+
+  group = parser.add_argument_group('JUnit Test Options')
+  group.add_argument(
+      '-s', '--test-suite', dest='test_suite', required=True,
+      help=('JUnit test suite to run.'))
+  group.add_argument(
+      '-f', '--test-filter', dest='test_filter',
+      help='Filters tests googletest-style.')
+  group.add_argument(
+      '--package-filter', dest='package_filter',
+      help='Filters tests by package.')
+  group.add_argument(
+      '--runner-filter', dest='runner_filter',
+      help='Filters tests by runner class. Must be fully qualified.')
+  group.add_argument(
+      '--sdk-version', dest='sdk_version', type=int,
+      help='The Android SDK version.')
+  AddCommonOptions(parser)
+
+
+def AddMonkeyTestOptions(parser):
+  """Adds monkey test options to |parser|."""
+
+  group = parser.add_argument_group('Monkey Test Options')
+  group.add_argument(
+      '--package', required=True, choices=constants.PACKAGE_INFO.keys(),
+      metavar='PACKAGE', help='Package under test.')
+  group.add_argument(
+      '--event-count', default=10000, type=int,
+      help='Number of events to generate (default: %(default)s).')
+  group.add_argument(
+      '--category', default='',
+      help='A list of allowed categories.')
+  group.add_argument(
+      '--throttle', default=100, type=int,
+      help='Delay between events (ms) (default: %(default)s). ')
+  group.add_argument(
+      '--seed', type=int,
+      help=('Seed value for pseudo-random generator. Same seed value generates '
+            'the same sequence of events. Seed is randomized by default.'))
+  group.add_argument(
+      '--extra-args', default='',
+      help=('String of other args to pass to the command verbatim.'))
+
+  AddCommonOptions(parser)
+  AddDeviceOptions(parser)
+
+def ProcessMonkeyTestOptions(args):
+  """Processes all monkey test options.
+
+  Args:
+    args: argparse.Namespace object.
+
+  Returns:
+    A MonkeyOptions named tuple which contains all options relevant to
+    monkey tests.
+  """
+  # TODO(jbudorick): Handle this directly in argparse with nargs='+'
+  category = args.category
+  if category:
+    category = args.category.split(',')
+
+  # TODO(jbudorick): Get rid of MonkeyOptions.
+  return monkey_test_options.MonkeyOptions(
+      args.verbose_count,
+      args.package,
+      args.event_count,
+      category,
+      args.throttle,
+      args.seed,
+      args.extra_args)
+
+def AddUirobotTestOptions(parser):
+  """Adds uirobot test options to |option_parser|."""
+  group = parser.add_argument_group('Uirobot Test Options')
+
+  group.add_argument('--app-under-test', required=True,
+                     help='APK to run tests on.')
+  group.add_argument(
+      '--repeat', dest='repeat', type=int, default=0,
+      help='Number of times to repeat the uirobot test.')
+  group.add_argument(
+      '--minutes', default=5, type=int,
+      help='Number of minutes to run uirobot test [default: %(default)s].')
+
+  AddCommonOptions(parser)
+  AddDeviceOptions(parser)
+  AddRemoteDeviceOptions(parser)
+
+def AddPerfTestOptions(parser):
+  """Adds perf test options to |parser|."""
+
+  group = parser.add_argument_group('Perf Test Options')
+
+  class SingleStepAction(argparse.Action):
+    def __call__(self, parser, namespace, values, option_string=None):
+      if values and not namespace.single_step:
+        parser.error('single step command provided, '
+                     'but --single-step not specified.')
+      elif namespace.single_step and not values:
+        parser.error('--single-step specified, '
+                     'but no single step command provided.')
+      setattr(namespace, self.dest, values)
+
+  step_group = group.add_mutually_exclusive_group(required=True)
+  # TODO(jbudorick): Revise --single-step to use argparse.REMAINDER.
+  # This requires removing "--" from client calls.
+  step_group.add_argument(
+      '--single-step', action='store_true',
+      help='Execute the given command with retries, but only print the result '
+           'for the "most successful" round.')
+  step_group.add_argument(
+      '--steps',
+      help='JSON file containing the list of commands to run.')
+  step_group.add_argument(
+      '--print-step',
+      help='The name of a previously executed perf step to print.')
+
+  group.add_argument(
+      '--output-json-list',
+      help='Write a simple list of names from --steps into the given file.')
+  group.add_argument(
+      '--collect-chartjson-data',
+      action='store_true',
+      help='Cache the chartjson output from each step for later use.')
+  group.add_argument(
+      '--output-chartjson-data',
+      default='',
+      help='Write out chartjson into the given file.')
+  group.add_argument(
+      '--get-output-dir-archive', metavar='FILENAME',
+      help='Write the chached output directory archived by a step into the'
+      ' given ZIP file.')
+  group.add_argument(
+      '--flaky-steps',
+      help=('A JSON file containing steps that are flaky '
+            'and will have its exit code ignored.'))
+  group.add_argument(
+      '--no-timeout', action='store_true',
+      help=('Do not impose a timeout. Each perf step is responsible for '
+            'implementing the timeout logic.'))
+  group.add_argument(
+      '-f', '--test-filter',
+      help=('Test filter (will match against the names listed in --steps).'))
+  group.add_argument(
+      '--dry-run', action='store_true',
+      help='Just print the steps without executing.')
+  # Uses 0.1 degrees C because that's what Android does.
+  group.add_argument(
+      '--max-battery-temp', type=int,
+      help='Only start tests when the battery is at or below the given '
+           'temperature (0.1 C)')
+  group.add_argument('single_step_command', nargs='*', action=SingleStepAction,
+                     help='If --single-step is specified, the command to run.')
+  group.add_argument('--min-battery-level', type=int,
+                     help='Only starts tests when the battery is charged above '
+                          'given level.')
+  group.add_argument('--known-devices-file', help='Path to known device list.')
+  AddCommonOptions(parser)
+  AddDeviceOptions(parser)
+
+
+def ProcessPerfTestOptions(args):
+  """Processes all perf test options.
+
+  Args:
+    args: argparse.Namespace object.
+
+  Returns:
+    A PerfOptions named tuple which contains all options relevant to
+    perf tests.
+  """
+  # TODO(jbudorick): Move single_step handling down into the perf tests.
+  if args.single_step:
+    args.single_step = ' '.join(args.single_step_command)
+  # TODO(jbudorick): Get rid of PerfOptions.
+  return perf_test_options.PerfOptions(
+      args.steps, args.flaky_steps, args.output_json_list,
+      args.print_step, args.no_timeout, args.test_filter,
+      args.dry_run, args.single_step, args.collect_chartjson_data,
+      args.output_chartjson_data, args.get_output_dir_archive,
+      args.max_battery_temp, args.min_battery_level,
+      args.known_devices_file)
+
+
+def AddPythonTestOptions(parser):
+  group = parser.add_argument_group('Python Test Options')
+  group.add_argument(
+      '-s', '--suite', dest='suite_name', metavar='SUITE_NAME',
+      choices=constants.PYTHON_UNIT_TEST_SUITES.keys(),
+      help='Name of the test suite to run.')
+  AddCommonOptions(parser)
+
+
+def _RunLinkerTests(args, devices):
+  """Subcommand of RunTestsCommands which runs linker tests."""
+  runner_factory, tests = linker_setup.Setup(args, devices)
+
+  results, exit_code = test_dispatcher.RunTests(
+      tests, runner_factory, devices, shard=True, test_timeout=60,
+      num_retries=args.num_retries)
+
+  report_results.LogFull(
+      results=results,
+      test_type='Linker test',
+      test_package='ChromiumLinkerTest')
+
+  if args.json_results_file:
+    json_results.GenerateJsonResultsFile([results], args.json_results_file)
+
+  return exit_code
+
+
+def _RunJUnitTests(args):
+  """Subcommand of RunTestsCommand which runs junit tests."""
+  runner_factory, tests = junit_setup.Setup(args)
+  results, exit_code = junit_dispatcher.RunTests(tests, runner_factory)
+
+  report_results.LogFull(
+      results=results,
+      test_type='JUnit',
+      test_package=args.test_suite)
+
+  if args.json_results_file:
+    json_results.GenerateJsonResultsFile([results], args.json_results_file)
+
+  return exit_code
+
+
+def _RunMonkeyTests(args, devices):
+  """Subcommand of RunTestsCommands which runs monkey tests."""
+  monkey_options = ProcessMonkeyTestOptions(args)
+
+  runner_factory, tests = monkey_setup.Setup(monkey_options)
+
+  results, exit_code = test_dispatcher.RunTests(
+      tests, runner_factory, devices, shard=False, test_timeout=None,
+      num_retries=args.num_retries)
+
+  report_results.LogFull(
+      results=results,
+      test_type='Monkey',
+      test_package='Monkey')
+
+  if args.json_results_file:
+    json_results.GenerateJsonResultsFile([results], args.json_results_file)
+
+  return exit_code
+
+
+def _RunPerfTests(args, active_devices):
+  """Subcommand of RunTestsCommands which runs perf tests."""
+  perf_options = ProcessPerfTestOptions(args)
+
+  # Just save a simple json with a list of test names.
+  if perf_options.output_json_list:
+    return perf_test_runner.OutputJsonList(
+        perf_options.steps, perf_options.output_json_list)
+
+  # Just print the results from a single previously executed step.
+  if perf_options.print_step:
+    return perf_test_runner.PrintTestOutput(
+        perf_options.print_step, perf_options.output_chartjson_data,
+        perf_options.get_output_dir_archive)
+
+  runner_factory, tests, devices = perf_setup.Setup(
+      perf_options, active_devices)
+
+  # shard=False means that each device will get the full list of tests
+  # and then each one will decide their own affinity.
+  # shard=True means each device will pop the next test available from a queue,
+  # which increases throughput but have no affinity.
+  results, _ = test_dispatcher.RunTests(
+      tests, runner_factory, devices, shard=False, test_timeout=None,
+      num_retries=args.num_retries)
+
+  report_results.LogFull(
+      results=results,
+      test_type='Perf',
+      test_package='Perf')
+
+  if args.json_results_file:
+    json_results.GenerateJsonResultsFile([results], args.json_results_file)
+
+  if perf_options.single_step:
+    return perf_test_runner.PrintTestOutput('single_step')
+
+  perf_test_runner.PrintSummary(tests)
+
+  # Always return 0 on the sharding stage. Individual tests exit_code
+  # will be returned on the print_step stage.
+  return 0
+
+
+def _RunPythonTests(args):
+  """Subcommand of RunTestsCommand which runs python unit tests."""
+  suite_vars = constants.PYTHON_UNIT_TEST_SUITES[args.suite_name]
+  suite_path = suite_vars['path']
+  suite_test_modules = suite_vars['test_modules']
+
+  sys.path = [suite_path] + sys.path
+  try:
+    suite = unittest.TestSuite()
+    suite.addTests(unittest.defaultTestLoader.loadTestsFromName(m)
+                   for m in suite_test_modules)
+    runner = unittest.TextTestRunner(verbosity=1+args.verbose_count)
+    return 0 if runner.run(suite).wasSuccessful() else 1
+  finally:
+    sys.path = sys.path[1:]
+
+
+def _GetAttachedDevices(blacklist_file, test_device, enable_cache, num_retries):
+  """Get all attached devices.
+
+  Args:
+    blacklist_file: Path to device blacklist.
+    test_device: Name of a specific device to use.
+    enable_cache: Whether to enable checksum caching.
+
+  Returns:
+    A list of attached devices.
+  """
+  blacklist = (device_blacklist.Blacklist(blacklist_file)
+               if blacklist_file
+               else None)
+
+  attached_devices = device_utils.DeviceUtils.HealthyDevices(
+      blacklist, enable_device_files_cache=enable_cache,
+      default_retries=num_retries)
+  if test_device:
+    test_device = [d for d in attached_devices if d == test_device]
+    if not test_device:
+      raise device_errors.DeviceUnreachableError(
+          'Did not find device %s among attached device. Attached devices: %s'
+          % (test_device, ', '.join(attached_devices)))
+    return test_device
+
+  else:
+    if not attached_devices:
+      raise device_errors.NoDevicesError()
+    return sorted(attached_devices)
+
+
+def RunTestsCommand(args): # pylint: disable=too-many-return-statements
+  """Checks test type and dispatches to the appropriate function.
+
+  Args:
+    args: argparse.Namespace object.
+
+  Returns:
+    Integer indicated exit code.
+
+  Raises:
+    Exception: Unknown command name passed in, or an exception from an
+        individual test runner.
+  """
+  command = args.command
+
+  ProcessCommonOptions(args)
+  logging.info('command: %s', ' '.join(sys.argv))
+
+  if args.enable_platform_mode or command in ('gtest', 'instrumentation'):
+    return RunTestsInPlatformMode(args)
+
+  forwarder.Forwarder.RemoveHostLog()
+  if not ports.ResetTestServerPortAllocation():
+    raise Exception('Failed to reset test server port.')
+
+  def get_devices():
+    return _GetAttachedDevices(args.blacklist_file, args.test_device,
+                               args.enable_device_cache, args.num_retries)
+
+  if command == 'linker':
+    return _RunLinkerTests(args, get_devices())
+  elif command == 'junit':
+    return _RunJUnitTests(args)
+  elif command == 'monkey':
+    return _RunMonkeyTests(args, get_devices())
+  elif command == 'perf':
+    return _RunPerfTests(args, get_devices())
+  elif command == 'python':
+    return _RunPythonTests(args)
+  else:
+    raise Exception('Unknown test type.')
+
+
+_SUPPORTED_IN_PLATFORM_MODE = [
+  # TODO(jbudorick): Add support for more test types.
+  'gtest',
+  'instrumentation',
+  'uirobot',
+]
+
+
+def RunTestsInPlatformMode(args):
+
+  def infra_error(message):
+    logging.fatal(message)
+    sys.exit(constants.INFRA_EXIT_CODE)
+
+  if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
+    infra_error('%s is not yet supported in platform mode' % args.command)
+
+  with environment_factory.CreateEnvironment(args, infra_error) as env:
+    with test_instance_factory.CreateTestInstance(args, infra_error) as test:
+      with test_run_factory.CreateTestRun(
+          args, env, test, infra_error) as test_run:
+        results = []
+        repetitions = (xrange(args.repeat + 1) if args.repeat >= 0
+                       else itertools.count())
+        result_counts = collections.defaultdict(
+            lambda: collections.defaultdict(int))
+        iteration_count = 0
+        for _ in repetitions:
+          iteration_results = test_run.RunTests()
+          if iteration_results is not None:
+            iteration_count += 1
+            results.append(iteration_results)
+            for r in iteration_results.GetAll():
+              result_counts[r.GetName()][r.GetType()] += 1
+            report_results.LogFull(
+                results=iteration_results,
+                test_type=test.TestType(),
+                test_package=test_run.TestPackage(),
+                annotation=getattr(args, 'annotations', None),
+                flakiness_server=getattr(args, 'flakiness_dashboard_server',
+                                         None))
+            if args.break_on_failure and not iteration_results.DidRunPass():
+              break
+
+        if iteration_count > 1:
+          # display summary results
+          # only display results for a test if at least one test did not pass
+          all_pass = 0
+          tot_tests = 0
+          for test_name in result_counts:
+            tot_tests += 1
+            if any(result_counts[test_name][x] for x in (
+                base_test_result.ResultType.FAIL,
+                base_test_result.ResultType.CRASH,
+                base_test_result.ResultType.TIMEOUT,
+                base_test_result.ResultType.UNKNOWN)):
+              logging.critical(
+                  '%s: %s',
+                  test_name,
+                  ', '.join('%s %s' % (str(result_counts[test_name][i]), i)
+                            for i in base_test_result.ResultType.GetTypes()))
+            else:
+              all_pass += 1
+
+          logging.critical('%s of %s tests passed in all %s runs',
+                           str(all_pass),
+                           str(tot_tests),
+                           str(iteration_count))
+
+        if args.json_results_file:
+          json_results.GenerateJsonResultsFile(
+              results, args.json_results_file)
+
+  return (0 if all(r.DidRunPass() for r in results)
+          else constants.ERROR_EXIT_CODE)
+
+
+CommandConfigTuple = collections.namedtuple(
+    'CommandConfigTuple',
+    ['add_options_func', 'help_txt'])
+VALID_COMMANDS = {
+    'gtest': CommandConfigTuple(
+        AddGTestOptions,
+        'googletest-based C++ tests'),
+    'instrumentation': CommandConfigTuple(
+        AddInstrumentationTestOptions,
+        'InstrumentationTestCase-based Java tests'),
+    'junit': CommandConfigTuple(
+        AddJUnitTestOptions,
+        'JUnit4-based Java tests'),
+    'monkey': CommandConfigTuple(
+        AddMonkeyTestOptions,
+        "Tests based on Android's monkey"),
+    'perf': CommandConfigTuple(
+        AddPerfTestOptions,
+        'Performance tests'),
+    'python': CommandConfigTuple(
+        AddPythonTestOptions,
+        'Python tests based on unittest.TestCase'),
+    'linker': CommandConfigTuple(
+        AddLinkerTestOptions,
+        'Linker tests'),
+    'uirobot': CommandConfigTuple(
+        AddUirobotTestOptions,
+        'Uirobot test'),
+}
+
+
+def DumpThreadStacks(_signal, _frame):
+  for thread in threading.enumerate():
+    reraiser_thread.LogThreadStack(thread)
+
+
+def main():
+  signal.signal(signal.SIGUSR1, DumpThreadStacks)
+
+  parser = argparse.ArgumentParser()
+  command_parsers = parser.add_subparsers(title='test types',
+                                          dest='command')
+
+  for test_type, config in sorted(VALID_COMMANDS.iteritems(),
+                                  key=lambda x: x[0]):
+    subparser = command_parsers.add_parser(
+        test_type, usage='%(prog)s [options]', help=config.help_txt)
+    config.add_options_func(subparser)
+
+  args = parser.parse_args()
+
+  try:
+    return RunTestsCommand(args)
+  except base_error.BaseError as e:
+    logging.exception('Error occurred.')
+    if e.is_infra_error:
+      return constants.INFRA_EXIT_CODE
+    return constants.ERROR_EXIT_CODE
+  except: # pylint: disable=W0702
+    logging.exception('Unrecognized error occurred.')
+    return constants.ERROR_EXIT_CODE
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/build/android/test_runner.pydeps b/build/android/test_runner.pydeps
new file mode 100644
index 0000000..f21b815
--- /dev/null
+++ b/build/android/test_runner.pydeps
@@ -0,0 +1,137 @@
+# Generated by running:
+#   build/print_python_deps.py --root build/android --output build/android/test_runner.pydeps build/android/test_runner.py
+../../third_party/appurify-python/src/appurify/__init__.py
+../../third_party/appurify-python/src/appurify/api.py
+../../third_party/appurify-python/src/appurify/constants.py
+../../third_party/appurify-python/src/appurify/utils.py
+../../third_party/catapult/catapult_base/catapult_base/__init__.py
+../../third_party/catapult/catapult_base/catapult_base/cloud_storage.py
+../../third_party/catapult/catapult_base/catapult_base/util.py
+../../third_party/catapult/dependency_manager/dependency_manager/__init__.py
+../../third_party/catapult/dependency_manager/dependency_manager/archive_info.py
+../../third_party/catapult/dependency_manager/dependency_manager/base_config.py
+../../third_party/catapult/dependency_manager/dependency_manager/cloud_storage_info.py
+../../third_party/catapult/dependency_manager/dependency_manager/dependency_info.py
+../../third_party/catapult/dependency_manager/dependency_manager/dependency_manager_util.py
+../../third_party/catapult/dependency_manager/dependency_manager/exceptions.py
+../../third_party/catapult/dependency_manager/dependency_manager/local_path_info.py
+../../third_party/catapult/dependency_manager/dependency_manager/manager.py
+../../third_party/catapult/dependency_manager/dependency_manager/uploader.py
+../../third_party/catapult/devil/devil/__init__.py
+../../third_party/catapult/devil/devil/android/__init__.py
+../../third_party/catapult/devil/devil/android/apk_helper.py
+../../third_party/catapult/devil/devil/android/battery_utils.py
+../../third_party/catapult/devil/devil/android/constants/__init__.py
+../../third_party/catapult/devil/devil/android/constants/file_system.py
+../../third_party/catapult/devil/devil/android/decorators.py
+../../third_party/catapult/devil/devil/android/device_blacklist.py
+../../third_party/catapult/devil/devil/android/device_errors.py
+../../third_party/catapult/devil/devil/android/device_list.py
+../../third_party/catapult/devil/devil/android/device_signal.py
+../../third_party/catapult/devil/devil/android/device_temp_file.py
+../../third_party/catapult/devil/devil/android/device_utils.py
+../../third_party/catapult/devil/devil/android/flag_changer.py
+../../third_party/catapult/devil/devil/android/forwarder.py
+../../third_party/catapult/devil/devil/android/install_commands.py
+../../third_party/catapult/devil/devil/android/logcat_monitor.py
+../../third_party/catapult/devil/devil/android/md5sum.py
+../../third_party/catapult/devil/devil/android/ports.py
+../../third_party/catapult/devil/devil/android/sdk/__init__.py
+../../third_party/catapult/devil/devil/android/sdk/aapt.py
+../../third_party/catapult/devil/devil/android/sdk/adb_wrapper.py
+../../third_party/catapult/devil/devil/android/sdk/build_tools.py
+../../third_party/catapult/devil/devil/android/sdk/gce_adb_wrapper.py
+../../third_party/catapult/devil/devil/android/sdk/intent.py
+../../third_party/catapult/devil/devil/android/sdk/keyevent.py
+../../third_party/catapult/devil/devil/android/sdk/split_select.py
+../../third_party/catapult/devil/devil/android/sdk/version_codes.py
+../../third_party/catapult/devil/devil/android/valgrind_tools/__init__.py
+../../third_party/catapult/devil/devil/android/valgrind_tools/base_tool.py
+../../third_party/catapult/devil/devil/base_error.py
+../../third_party/catapult/devil/devil/constants/__init__.py
+../../third_party/catapult/devil/devil/constants/exit_codes.py
+../../third_party/catapult/devil/devil/devil_env.py
+../../third_party/catapult/devil/devil/utils/__init__.py
+../../third_party/catapult/devil/devil/utils/cmd_helper.py
+../../third_party/catapult/devil/devil/utils/file_utils.py
+../../third_party/catapult/devil/devil/utils/host_utils.py
+../../third_party/catapult/devil/devil/utils/lazy/__init__.py
+../../third_party/catapult/devil/devil/utils/lazy/weak_constant.py
+../../third_party/catapult/devil/devil/utils/parallelizer.py
+../../third_party/catapult/devil/devil/utils/reraiser_thread.py
+../../third_party/catapult/devil/devil/utils/run_tests_helper.py
+../../third_party/catapult/devil/devil/utils/timeout_retry.py
+../../third_party/catapult/devil/devil/utils/watchdog_timer.py
+../../third_party/catapult/devil/devil/utils/zip_utils.py
+../util/lib/common/unittest_util.py
+devil_chromium.py
+pylib/__init__.py
+pylib/base/__init__.py
+pylib/base/base_test_result.py
+pylib/base/base_test_runner.py
+pylib/base/environment.py
+pylib/base/environment_factory.py
+pylib/base/test_collection.py
+pylib/base/test_dispatcher.py
+pylib/base/test_instance.py
+pylib/base/test_instance_factory.py
+pylib/base/test_run.py
+pylib/base/test_run_factory.py
+pylib/base/test_server.py
+pylib/chrome_test_server_spawner.py
+pylib/constants/__init__.py
+pylib/constants/host_paths.py
+pylib/gtest/__init__.py
+pylib/gtest/gtest_test_instance.py
+pylib/instrumentation/__init__.py
+pylib/instrumentation/instrumentation_parser.py
+pylib/instrumentation/instrumentation_test_instance.py
+pylib/instrumentation/test_result.py
+pylib/junit/__init__.py
+pylib/junit/setup.py
+pylib/junit/test_dispatcher.py
+pylib/junit/test_runner.py
+pylib/linker/__init__.py
+pylib/linker/setup.py
+pylib/linker/test_case.py
+pylib/linker/test_runner.py
+pylib/local/__init__.py
+pylib/local/device/__init__.py
+pylib/local/device/local_device_environment.py
+pylib/local/device/local_device_gtest_run.py
+pylib/local/device/local_device_instrumentation_test_run.py
+pylib/local/device/local_device_test_run.py
+pylib/local/local_test_server_spawner.py
+pylib/monkey/__init__.py
+pylib/monkey/setup.py
+pylib/monkey/test_options.py
+pylib/monkey/test_runner.py
+pylib/perf/__init__.py
+pylib/perf/setup.py
+pylib/perf/test_options.py
+pylib/perf/test_runner.py
+pylib/remote/__init__.py
+pylib/remote/device/__init__.py
+pylib/remote/device/appurify_constants.py
+pylib/remote/device/appurify_sanitized.py
+pylib/remote/device/remote_device_environment.py
+pylib/remote/device/remote_device_gtest_run.py
+pylib/remote/device/remote_device_helper.py
+pylib/remote/device/remote_device_instrumentation_test_run.py
+pylib/remote/device/remote_device_test_run.py
+pylib/remote/device/remote_device_uirobot_test_run.py
+pylib/results/__init__.py
+pylib/results/flakiness_dashboard/__init__.py
+pylib/results/flakiness_dashboard/json_results_generator.py
+pylib/results/flakiness_dashboard/results_uploader.py
+pylib/results/json_results.py
+pylib/results/report_results.py
+pylib/uirobot/__init__.py
+pylib/uirobot/uirobot_test_instance.py
+pylib/utils/__init__.py
+pylib/utils/isolator.py
+pylib/utils/proguard.py
+pylib/utils/repo_utils.py
+pylib/utils/test_environment.py
+pylib/valgrind_tools.py
+test_runner.py
diff --git a/build/android/tests/symbolize/Makefile b/build/android/tests/symbolize/Makefile
new file mode 100644
index 0000000..5178a04
--- /dev/null
+++ b/build/android/tests/symbolize/Makefile
@@ -0,0 +1,11 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+TOOLCHAIN=../../../../third_party/android_tools/ndk/toolchains/arm-linux-androideabi-4.6/prebuilt/linux-x86_64/bin/arm-linux-androideabi-
+CXX=$(TOOLCHAIN)g++
+
+lib%.so: %.cc
+	$(CXX) -nostdlib -g -fPIC -shared $< -o $@
+
+all: liba.so libb.so
diff --git a/build/android/tests/symbolize/a.cc b/build/android/tests/symbolize/a.cc
new file mode 100644
index 0000000..f0c7ca4
--- /dev/null
+++ b/build/android/tests/symbolize/a.cc
@@ -0,0 +1,14 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+class A {
+ public:
+  A();
+  void Foo(int i);
+  void Bar(const char* c);
+};
+
+A::A() {}
+void A::Foo(int i) {}
+void A::Bar(const char* c) {}
diff --git a/build/android/tests/symbolize/b.cc b/build/android/tests/symbolize/b.cc
new file mode 100644
index 0000000..db87520
--- /dev/null
+++ b/build/android/tests/symbolize/b.cc
@@ -0,0 +1,14 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+class B {
+ public:
+  B();
+  void Baz(float f);
+  void Qux(double d);
+};
+
+B::B() {}
+void B::Baz(float f) {}
+void B::Qux(double d) {}
diff --git a/build/android/tombstones.py b/build/android/tombstones.py
new file mode 100755
index 0000000..d3af2a6
--- /dev/null
+++ b/build/android/tombstones.py
@@ -0,0 +1,273 @@
+#!/usr/bin/env python
+#
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# Find the most recent tombstone file(s) on all connected devices
+# and prints their stacks.
+#
+# Assumes tombstone file was created with current symbols.
+
+import datetime
+import logging
+import multiprocessing
+import os
+import re
+import subprocess
+import sys
+import optparse
+
+import devil_chromium
+
+from devil.android import device_blacklist
+from devil.android import device_errors
+from devil.android import device_utils
+from devil.utils import run_tests_helper
+from pylib import constants
+
+_TZ_UTC = {'TZ': 'UTC'}
+
+def _ListTombstones(device):
+  """List the tombstone files on the device.
+
+  Args:
+    device: An instance of DeviceUtils.
+
+  Yields:
+    Tuples of (tombstone filename, date time of file on device).
+  """
+  try:
+    if not device.PathExists('/data/tombstones', as_root=True):
+      return
+    # TODO(perezju): Introduce a DeviceUtils.Ls() method (crbug.com/552376).
+    lines = device.RunShellCommand(
+        ['ls', '-a', '-l', '/data/tombstones'],
+        as_root=True, check_return=True, env=_TZ_UTC)
+    for line in lines:
+      if 'tombstone' in line:
+        details = line.split()
+        t = datetime.datetime.strptime(details[-3] + ' ' + details[-2],
+                                       '%Y-%m-%d %H:%M')
+        yield details[-1], t
+  except device_errors.CommandFailedError:
+    logging.exception('Could not retrieve tombstones.')
+  except device_errors.CommandTimeoutError:
+    logging.exception('Timed out retrieving tombstones.')
+
+
+def _GetDeviceDateTime(device):
+  """Determine the date time on the device.
+
+  Args:
+    device: An instance of DeviceUtils.
+
+  Returns:
+    A datetime instance.
+  """
+  device_now_string = device.RunShellCommand(
+      ['date'], check_return=True, env=_TZ_UTC)
+  return datetime.datetime.strptime(
+      device_now_string[0], '%a %b %d %H:%M:%S %Z %Y')
+
+
+def _GetTombstoneData(device, tombstone_file):
+  """Retrieve the tombstone data from the device
+
+  Args:
+    device: An instance of DeviceUtils.
+    tombstone_file: the tombstone to retrieve
+
+  Returns:
+    A list of lines
+  """
+  return device.ReadFile(
+      '/data/tombstones/' + tombstone_file, as_root=True).splitlines()
+
+
+def _EraseTombstone(device, tombstone_file):
+  """Deletes a tombstone from the device.
+
+  Args:
+    device: An instance of DeviceUtils.
+    tombstone_file: the tombstone to delete.
+  """
+  return device.RunShellCommand(
+      ['rm', '/data/tombstones/' + tombstone_file],
+      as_root=True, check_return=True)
+
+
+def _DeviceAbiToArch(device_abi):
+  # The order of this list is significant to find the more specific match (e.g.,
+  # arm64) before the less specific (e.g., arm).
+  arches = ['arm64', 'arm', 'x86_64', 'x86_64', 'x86', 'mips']
+  for arch in arches:
+    if arch in device_abi:
+      return arch
+  raise RuntimeError('Unknown device ABI: %s' % device_abi)
+
+def _ResolveSymbols(tombstone_data, include_stack, device_abi):
+  """Run the stack tool for given tombstone input.
+
+  Args:
+    tombstone_data: a list of strings of tombstone data.
+    include_stack: boolean whether to include stack data in output.
+    device_abi: the default ABI of the device which generated the tombstone.
+
+  Yields:
+    A string for each line of resolved stack output.
+  """
+  # Check if the tombstone data has an ABI listed, if so use this in preference
+  # to the device's default ABI.
+  for line in tombstone_data:
+    found_abi = re.search('ABI: \'(.+?)\'', line)
+    if found_abi:
+      device_abi = found_abi.group(1)
+  arch = _DeviceAbiToArch(device_abi)
+  if not arch:
+    return
+
+  stack_tool = os.path.join(os.path.dirname(__file__), '..', '..',
+                            'third_party', 'android_platform', 'development',
+                            'scripts', 'stack')
+  cmd = [stack_tool, '--arch', arch, '--output-directory',
+         constants.GetOutDirectory()]
+  proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+  output = proc.communicate(input='\n'.join(tombstone_data))[0]
+  for line in output.split('\n'):
+    if not include_stack and 'Stack Data:' in line:
+      break
+    yield line
+
+
+def _ResolveTombstone(tombstone):
+  lines = []
+  lines += [tombstone['file'] + ' created on ' + str(tombstone['time']) +
+            ', about this long ago: ' +
+            (str(tombstone['device_now'] - tombstone['time']) +
+            ' Device: ' + tombstone['serial'])]
+  logging.info('\n'.join(lines))
+  logging.info('Resolving...')
+  lines += _ResolveSymbols(tombstone['data'], tombstone['stack'],
+                           tombstone['device_abi'])
+  return lines
+
+
+def _ResolveTombstones(jobs, tombstones):
+  """Resolve a list of tombstones.
+
+  Args:
+    jobs: the number of jobs to use with multiprocess.
+    tombstones: a list of tombstones.
+  """
+  if not tombstones:
+    logging.warning('No tombstones to resolve.')
+    return
+  if len(tombstones) == 1:
+    data = [_ResolveTombstone(tombstones[0])]
+  else:
+    pool = multiprocessing.Pool(processes=jobs)
+    data = pool.map(_ResolveTombstone, tombstones)
+  for tombstone in data:
+    for line in tombstone:
+      logging.info(line)
+
+
+def _GetTombstonesForDevice(device, options):
+  """Returns a list of tombstones on a given device.
+
+  Args:
+    device: An instance of DeviceUtils.
+    options: command line arguments from OptParse
+  """
+  ret = []
+  all_tombstones = list(_ListTombstones(device))
+  if not all_tombstones:
+    logging.warning('No tombstones.')
+    return ret
+
+  # Sort the tombstones in date order, descending
+  all_tombstones.sort(cmp=lambda a, b: cmp(b[1], a[1]))
+
+  # Only resolve the most recent unless --all-tombstones given.
+  tombstones = all_tombstones if options.all_tombstones else [all_tombstones[0]]
+
+  device_now = _GetDeviceDateTime(device)
+  try:
+    for tombstone_file, tombstone_time in tombstones:
+      ret += [{'serial': str(device),
+               'device_abi': device.product_cpu_abi,
+               'device_now': device_now,
+               'time': tombstone_time,
+               'file': tombstone_file,
+               'stack': options.stack,
+               'data': _GetTombstoneData(device, tombstone_file)}]
+  except device_errors.CommandFailedError:
+    for line in device.RunShellCommand(
+        ['ls', '-a', '-l', '/data/tombstones'],
+        as_root=True, check_return=True, env=_TZ_UTC, timeout=60):
+      logging.info('%s: %s', str(device), line)
+    raise
+
+  # Erase all the tombstones if desired.
+  if options.wipe_tombstones:
+    for tombstone_file, _ in all_tombstones:
+      _EraseTombstone(device, tombstone_file)
+
+  return ret
+
+
+def main():
+  custom_handler = logging.StreamHandler(sys.stdout)
+  custom_handler.setFormatter(run_tests_helper.CustomFormatter())
+  logging.getLogger().addHandler(custom_handler)
+  logging.getLogger().setLevel(logging.INFO)
+
+  parser = optparse.OptionParser()
+  parser.add_option('--device',
+                    help='The serial number of the device. If not specified '
+                         'will use all devices.')
+  parser.add_option('--blacklist-file', help='Device blacklist JSON file.')
+  parser.add_option('-a', '--all-tombstones', action='store_true',
+                    help="""Resolve symbols for all tombstones, rather than just
+                         the most recent""")
+  parser.add_option('-s', '--stack', action='store_true',
+                    help='Also include symbols for stack data')
+  parser.add_option('-w', '--wipe-tombstones', action='store_true',
+                    help='Erase all tombstones from device after processing')
+  parser.add_option('-j', '--jobs', type='int',
+                    default=4,
+                    help='Number of jobs to use when processing multiple '
+                         'crash stacks.')
+  parser.add_option('--output-directory',
+                    help='Path to the root build directory.')
+  options, _ = parser.parse_args()
+
+  devil_chromium.Initialize()
+
+  blacklist = (device_blacklist.Blacklist(options.blacklist_file)
+               if options.blacklist_file
+               else None)
+
+  if options.output_directory:
+    constants.SetOutputDirectory(options.output_directory)
+  # Do an up-front test that the output directory is known.
+  constants.CheckOutputDirectory()
+
+  if options.device:
+    devices = [device_utils.DeviceUtils(options.device)]
+  else:
+    devices = device_utils.DeviceUtils.HealthyDevices(blacklist)
+
+  # This must be done serially because strptime can hit a race condition if
+  # used for the first time in a multithreaded environment.
+  # http://bugs.python.org/issue7980
+  tombstones = []
+  for device in devices:
+    tombstones += _GetTombstonesForDevice(device, options)
+
+  _ResolveTombstones(options.jobs, tombstones)
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/build/android/update_verification.py b/build/android/update_verification.py
new file mode 100755
index 0000000..40cb64a
--- /dev/null
+++ b/build/android/update_verification.py
@@ -0,0 +1,115 @@
+#!/usr/bin/env python
+#
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Runs semi-automated update testing on a non-rooted device.
+
+This script will help verify that app data is preserved during an update.
+To use this script first run it with the create_app_data option.
+
+./update_verification.py create_app_data --old-apk <path> --app-data <path>
+
+The script will then install the old apk, prompt you to create some app data
+(bookmarks, etc.), and then save the app data in the path you gave it.
+
+Next, once you have some app data saved, run this script with the test_update
+option.
+
+./update_verification.py test_update --old-apk <path> --new-apk <path>
+--app-data <path>
+
+This will install the old apk, load the saved app data, install the new apk,
+and ask the user to verify that all of the app data was preserved.
+"""
+
+import argparse
+import logging
+import sys
+
+import devil_chromium
+
+from devil.android import apk_helper
+from devil.android import device_blacklist
+from devil.android import device_errors
+from devil.android import device_utils
+from devil.utils import run_tests_helper
+
+def CreateAppData(device, old_apk, app_data, package_name):
+  device.Install(old_apk)
+  raw_input('Set the application state. Once ready, press enter and '
+            'select "Backup my data" on the device.')
+  device.adb.Backup(app_data, packages=[package_name])
+  logging.critical('Application data saved to %s', app_data)
+
+def TestUpdate(device, old_apk, new_apk, app_data, package_name):
+  device.Install(old_apk)
+  device.adb.Restore(app_data)
+  # Restore command is not synchronous
+  raw_input('Select "Restore my data" on the device. Then press enter to '
+            'continue.')
+  device_path = device.GetApplicationPaths(package_name)
+  if not device_path:
+    raise Exception('Expected package %s to already be installed. '
+                    'Package name might have changed!' % package_name)
+
+  logging.info('Verifying that %s can be overinstalled.', new_apk)
+  device.adb.Install(new_apk, reinstall=True)
+  logging.critical('Successfully updated to the new apk. Please verify that '
+                   'the application data is preserved.')
+
+def main():
+  parser = argparse.ArgumentParser(
+      description="Script to do semi-automated upgrade testing.")
+  parser.add_argument('-v', '--verbose', action='count',
+                      help='Print verbose log information.')
+  parser.add_argument('--blacklist-file', help='Device blacklist JSON file.')
+  command_parsers = parser.add_subparsers(dest='command')
+
+  subparser = command_parsers.add_parser('create_app_data')
+  subparser.add_argument('--old-apk', required=True,
+                         help='Path to apk to update from.')
+  subparser.add_argument('--app-data', required=True,
+                         help='Path to where the app data backup should be '
+                           'saved to.')
+  subparser.add_argument('--package-name',
+                         help='Chrome apk package name.')
+
+  subparser = command_parsers.add_parser('test_update')
+  subparser.add_argument('--old-apk', required=True,
+                         help='Path to apk to update from.')
+  subparser.add_argument('--new-apk', required=True,
+                         help='Path to apk to update to.')
+  subparser.add_argument('--app-data', required=True,
+                         help='Path to where the app data backup is saved.')
+  subparser.add_argument('--package-name',
+                         help='Chrome apk package name.')
+
+  args = parser.parse_args()
+  run_tests_helper.SetLogLevel(args.verbose)
+
+  devil_chromium.Initialize()
+
+  blacklist = (device_blacklist.Blacklist(args.blacklist_file)
+               if args.blacklist_file
+               else None)
+
+  devices = device_utils.DeviceUtils.HealthyDevices(blacklist)
+  if not devices:
+    raise device_errors.NoDevicesError()
+  device = devices[0]
+  logging.info('Using device %s for testing.', str(device))
+
+  package_name = (args.package_name if args.package_name
+                  else apk_helper.GetPackageName(args.old_apk))
+  if args.command == 'create_app_data':
+    CreateAppData(device, args.old_apk, args.app_data, package_name)
+  elif args.command == 'test_update':
+    TestUpdate(
+        device, args.old_apk, args.new_apk, args.app_data, package_name)
+  else:
+    raise Exception('Unknown test command: %s' % args.command)
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/build/android/v8_external_startup_data_arch_suffix.gypi b/build/android/v8_external_startup_data_arch_suffix.gypi
new file mode 100644
index 0000000..7af2443
--- /dev/null
+++ b/build/android/v8_external_startup_data_arch_suffix.gypi
@@ -0,0 +1,21 @@
+# Copyright (c) 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+  'variables': {
+    'arch_suffix': '<(arch_suffix)',
+    'variables': {
+      # This help to find out if target_arch is set to something else.
+      'arch_suffix': '<(target_arch)',
+      'conditions': [
+        ['target_arch=="arm" or target_arch=="ia32" or target_arch=="mipsel"', {
+          'arch_suffix': '32',
+        }],
+        ['target_arch=="arm64" or target_arch=="x64" or target_arch=="mips64el"', {
+          'arch_suffix':'64'
+        }],
+      ],
+    }
+  }
+}
diff --git a/build/android/video_recorder.py b/build/android/video_recorder.py
new file mode 100755
index 0000000..b21759a
--- /dev/null
+++ b/build/android/video_recorder.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+import devil_chromium
+from devil.android.tools import video_recorder
+
+if __name__ == '__main__':
+  devil_chromium.Initialize()
+  sys.exit(video_recorder.main())
diff --git a/build/android/write_ordered_libraries.gypi b/build/android/write_ordered_libraries.gypi
new file mode 100644
index 0000000..1b52e71
--- /dev/null
+++ b/build/android/write_ordered_libraries.gypi
@@ -0,0 +1,43 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This file is meant to be included into an action to provide a rule that
+# generates a json file with the list of dependent libraries needed for a given
+# shared library or executable.
+#
+# To use this, create a gyp target with the following form:
+#  {
+#    'actions': [
+#      'variables': {
+#        'input_libraries': 'shared library or executable to process',
+#        'ordered_libraries_file': 'file to generate'
+#      },
+#      'includes': [ '../../build/android/write_ordered_libraries.gypi' ],
+#    ],
+#  },
+#
+
+{
+  'action_name': 'ordered_libraries_<(_target_name)<(subtarget)',
+  'message': 'Writing dependency ordered libraries for <(_target_name)',
+  'variables': {
+    'input_libraries%': [],
+    'subtarget%': '',
+  },
+  'inputs': [
+    '<(DEPTH)/build/android/gyp/util/build_utils.py',
+    '<(DEPTH)/build/android/gyp/write_ordered_libraries.py',
+    '<@(input_libraries)',
+  ],
+  'outputs': [
+    '<(ordered_libraries_file)',
+  ],
+  'action': [
+    'python', '<(DEPTH)/build/android/gyp/write_ordered_libraries.py',
+    '--input-libraries=<(input_libraries)',
+    '--libraries-dir=<(SHARED_LIB_DIR),<(PRODUCT_DIR)',
+    '--readelf=<(android_readelf)',
+    '--output=<(ordered_libraries_file)',
+  ],
+}